aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt6
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst5
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/defza.txt57
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile10
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile26
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi11
-rw-r--r--arch/arm/kernel/vmlinux.lds.h2
-rw-r--r--arch/mips/include/asm/processor.h10
-rw-r--r--arch/mips/kernel/process.c25
-rw-r--r--arch/mips/kernel/setup.c48
-rw-r--r--arch/mips/kernel/vdso.c18
-rw-r--r--arch/mips/lib/memset.S4
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/lib/code-patching.c20
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/kernel/early_printk.c2
-rw-r--r--arch/s390/kernel/swsusp.S8
-rw-r--r--arch/sparc/kernel/auxio_64.c4
-rw-r--r--arch/sparc/kernel/kgdb_32.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/power.c4
-rw-r--r--arch/sparc/kernel/prom_32.c26
-rw-r--r--arch/sparc/kernel/prom_64.c68
-rw-r--r--arch/sparc/kernel/viohs.c12
-rw-r--r--arch/sparc/vdso/Makefile8
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h6
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c20
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c36
-rw-r--r--arch/x86/mm/pgtable.c10
-rw-r--r--drivers/atm/fore200e.c401
-rw-r--r--drivers/atm/fore200e.h8
-rw-r--r--drivers/base/firmware_loader/main.c7
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c8
-rw-r--r--drivers/fpga/dfl-fme-region.c4
-rw-r--r--drivers/fpga/fpga-bridge.c2
-rw-r--r--drivers/fpga/of-fpga-region.c3
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c15
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c22
-rw-r--r--drivers/i2c/busses/i2c-scmi.c1
-rw-r--r--drivers/infiniband/core/verbs.c46
-rw-r--r--drivers/infiniband/hw/mlx5/main.c23
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c132
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/input/serio/i8042.c29
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c2
-rw-r--r--drivers/md/dm-cache-target.c5
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c3
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/mmc/core/block.c10
-rw-r--r--drivers/mux/adgs1408.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h425
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c302
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h71
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c285
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h229
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c468
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h42
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h206
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1671
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h250
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h310
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c366
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h19
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c485
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h65
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h211
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1637
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h158
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c194
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h441
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h581
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c179
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_abi.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c121
-rw-r--r--drivers/net/ethernet/realtek/r8169.c37
-rw-r--r--drivers/net/ethernet/socionext/netsec.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c59
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h8
-rw-r--r--drivers/net/fddi/Kconfig11
-rw-r--r--drivers/net/fddi/Makefile1
-rw-r--r--drivers/net/fddi/defza.c1564
-rw-r--r--drivers/net/fddi/defza.h791
-rw-r--r--drivers/net/fddi/skfp/h/cmtdef.h8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c15
-rw-r--r--drivers/net/phy/phy.c95
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/tun.c37
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/veth.c175
-rw-r--r--drivers/net/virtio_net.c49
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c76
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c1019
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h129
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c2072
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h677
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c267
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c187
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h254
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h57
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c837
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c420
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c29
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c11
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c311
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c206
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c167
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c11
-rw-r--r--drivers/net/wireless/quantenna/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h22
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h245
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c71
-rw-r--r--drivers/of/unittest.c26
-rw-r--r--drivers/pci/controller/pcie-cadence.c4
-rw-r--r--drivers/phy/mscc/phy-ocelot-serdes.c4
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c13
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c11
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h2
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c159
-rw-r--r--drivers/s390/net/qeth_core_mpc.h26
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3_main.c47
-rw-r--r--drivers/sbus/char/openprom.c11
-rw-r--r--drivers/sbus/char/oradax.c3
-rw-r--r--drivers/scsi/qedi/qedi_main.c1
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c8
-rw-r--r--drivers/thunderbolt/icm.c49
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c4
-rw-r--r--drivers/tty/serial/sh-sci.c56
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/serial/option.c15
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/video/fbdev/aty/atyfb.h3
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c7
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c10
-rw-r--r--fs/afs/cell.c17
-rw-r--r--fs/afs/dynroot.c2
-rw-r--r--fs/afs/internal.h4
-rw-r--r--fs/afs/main.c2
-rw-r--r--fs/afs/proc.c7
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/gfs2/bmap.c4
-rw-r--r--fs/xfs/xfs_reflink.c200
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/fpga/fpga-mgr.h20
-rw-r--r--include/linux/gpio/driver.h7
-rw-r--r--include/linux/ieee80211.h1
-rw-r--r--include/linux/mlx5/driver.h14
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/net/cfg80211.h62
-rw-r--r--include/net/devlink.h12
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/mac80211.h43
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/sock.h4
-rw-r--r--include/rdma/ib_verbs.h30
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/qman.h8
-rw-r--r--include/trace/events/rxrpc.h1
-rw-r--r--include/uapi/linux/if_fddi.h21
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/ncsi.h6
-rw-r--r--include/uapi/linux/nl80211.h98
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--kernel/cgroup/cgroup.c25
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bch.c17
-rw-r--r--lib/vsprintf.c2
-rw-r--r--lib/xz/xz_crc32.c1
-rw-r--r--lib/xz/xz_private.h4
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/percpu.c1
-rw-r--r--net/bridge/br_netlink.c14
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/br_sysfs_br.c17
-rw-r--r--net/bridge/br_vlan.c53
-rw-r--r--net/core/dev.c41
-rw-r--r--net/core/devlink.c43
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/neighbour.c27
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/skbuff.c12
-rw-r--r--net/core/sock.c9
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/fib_frontend.c12
-rw-r--r--net/ipv4/fib_semantics.c50
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_bbr.c10
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_dctcp.c55
-rw-r--r--net/ipv4/tcp_dctcp.h40
-rw-r--r--net/ipv4/tcp_output.c72
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/ip6_fib.c22
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/Makefile11
-rw-r--r--net/mac80211/cfg.c85
-rw-r--r--net/mac80211/driver-ops.h16
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/main.c11
-rw-r--r--net/mac80211/mlme.c107
-rw-r--r--net/mac80211/rate.h13
-rw-r--r--net/mac80211/rc80211_minstrel.c162
-rw-r--r--net/mac80211/rc80211_minstrel.h35
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c68
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c298
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h20
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c58
-rw-r--r--net/mac80211/rx.c12
-rw-r--r--net/mac80211/status.c19
-rw-r--r--net/mac80211/trace.h23
-rw-r--r--net/mac80211/util.c8
-rw-r--r--net/mpls/af_mpls.c36
-rw-r--r--net/ncsi/internal.h7
-rw-r--r--net/ncsi/ncsi-cmd.c8
-rw-r--r--net/ncsi/ncsi-manage.c16
-rw-r--r--net/ncsi/ncsi-netlink.c204
-rw-r--r--net/ncsi/ncsi-netlink.h12
-rw-r--r--net/ncsi/ncsi-rsp.c67
-rw-r--r--net/rds/send.c13
-rw-r--r--net/rxrpc/ar-internal.h24
-rw-r--r--net/rxrpc/call_accept.c27
-rw-r--r--net/rxrpc/call_object.c5
-rw-r--r--net/rxrpc/conn_client.c10
-rw-r--r--net/rxrpc/conn_event.c26
-rw-r--r--net/rxrpc/input.c251
-rw-r--r--net/rxrpc/local_object.c30
-rw-r--r--net/rxrpc/net_ns.c3
-rw-r--r--net/rxrpc/peer_event.c5
-rw-r--r--net/rxrpc/peer_object.c29
-rw-r--r--net/rxrpc/proc.c126
-rw-r--r--net/sched/cls_u32.c6
-rw-r--r--net/sched/sch_cake.c2
-rw-r--r--net/sched/sch_fq.c22
-rw-r--r--net/sched/sch_generic.c14
-rw-r--r--net/tipc/link.c27
-rw-r--r--net/tipc/socket.c14
-rw-r--r--net/tipc/udp_media.c18
-rw-r--r--net/wireless/core.c83
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/lib80211_crypt_tkip.c59
-rw-r--r--net/wireless/lib80211_crypt_wep.c52
-rw-r--r--net/wireless/nl80211.c830
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/reg.c9
-rw-r--r--net/wireless/trace.h233
-rw-r--r--samples/Kconfig1
-rw-r--r--scripts/Makefile.build2
-rw-r--r--tools/hv/hv_fcopy_daemon.c1
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py9
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py6
-rw-r--r--tools/perf/util/machine.c8
-rw-r--r--tools/perf/util/setup.py2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh0
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh2
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh8
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh69
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh2
514 files changed, 22702 insertions, 8639 deletions
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 7fd4e8ce4149..2196d1ab3c8c 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -56,6 +56,11 @@ Optional properties:
the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
optional "supply-name" is "vdd-0.8-cx-mx".
+- memory-region:
+ Usage: optional
+ Value type: <phandle>
+ Definition: reference to the reserved-memory for the msa region
+ used by the wifi firmware running in Q6.
Example (to supply the calibration data alone):
@@ -149,4 +154,5 @@ wifi@18000000 {
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ memory-region = <&wifi_msa_mem>;
};
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 4b3825da48d9..82b6dbbd31cd 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
API for programming an FPGA
---------------------------
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :doc: FPGA Manager flags
+
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_image_info
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index f4f2b5d6c8d8..2d239770b95f 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -56,6 +56,8 @@ de4x5.txt
- the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
decnet.txt
- info on using the DECnet networking layer in Linux.
+defza.txt
+ - the DEC FDDIcontroller 700 (DEFZA-xx) TURBOchannel FDDI driver
dl2k.txt
- README for D-Link DL2000-based Gigabit Ethernet Adapters (dl2k.ko).
dm9000.txt
diff --git a/Documentation/networking/defza.txt b/Documentation/networking/defza.txt
new file mode 100644
index 000000000000..663e4a906751
--- /dev/null
+++ b/Documentation/networking/defza.txt
@@ -0,0 +1,57 @@
+Notes on the DEC FDDIcontroller 700 (DEFZA-xx) driver v.1.1.4.
+
+
+DEC FDDIcontroller 700 is DEC's first-generation TURBOchannel FDDI
+network card, designed in 1990 specifically for the DECstation 5000
+model 200 workstation. The board is a single attachment station and
+it was manufactured in two variations, both of which are supported.
+
+First is the SAS MMF DEFZA-AA option, the original design implementing
+the standard MMF-PMD, however with a pair of ST connectors rather than
+the usual MIC connector. The other one is the SAS ThinWire/STP DEFZA-CA
+option, denoted 700-C, with the network medium selectable by a switch
+between the DEC proprietary ThinWire-PMD using a BNC connector and the
+standard STP-PMD using a DE-9F connector. This option can interface to
+a DECconcentrator 500 device and, in the case of the STP-PMD, also other
+FDDI equipment and was designed to make it easier to transition from
+existing IEEE 802.3 10BASE2 Ethernet and IEEE 802.5 Token Ring networks
+by providing means to reuse existing cabling.
+
+This driver handles any number of cards installed in a single system.
+They get fddi0, fddi1, etc. interface names assigned in the order of
+increasing TURBOchannel slot numbers.
+
+The board only supports DMA on the receive side. Transmission involves
+the use of PIO. As a result under a heavy transmission load there will
+be a significant impact on system performance.
+
+The board supports a 64-entry CAM for matching destination addresses.
+Two entries are preoccupied by the Directed Beacon and Ring Purger
+multicast addresses and the rest is used as a multicast filter. An
+all-multi mode is also supported for LLC frames and it is used if
+requested explicitly or if the CAM overflows. The promiscuous mode
+supports separate enables for LLC and SMT frames, but this driver
+doesn't support changing them individually.
+
+
+Known problems:
+
+None.
+
+
+To do:
+
+5. MAC address change. The card does not support changing the Media
+ Access Controller's address registers but a similar effect can be
+ achieved by adding an alias to the CAM. There is no way to disable
+ matching against the original address though.
+
+7. Queueing incoming/outgoing SMT frames in the driver if the SMT
+ receive/RMC transmit ring is full. (?)
+
+8. Retrieving/reporting FDDI/SNMP stats.
+
+
+Both success and failure reports are welcome.
+
+Maciej W. Rozycki <macro@linux-mips.org>
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 960de8fe3f40..163b5ff1073c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1442,6 +1442,14 @@ max_hbh_length - INTEGER
header.
Default: INT_MAX (unlimited)
+skip_notify_on_dev_down - BOOLEAN
+ Controls whether an RTM_DELROUTE message is generated for routes
+ removed when a device is taken down or deleted. IPv4 does not
+ generate this message; IPv6 does by default. Setting this sysctl
+ to true skips the message, making IPv4 and IPv6 on par in relying
+ on userspace caches to track link events and evict routes.
+ Default: false (generate message)
+
IPv6 Fragmentation:
ip6frag_high_thresh - INTEGER
diff --git a/MAINTAINERS b/MAINTAINERS
index 3356381bd1a9..7f1399ac028e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4170,6 +4170,11 @@ S: Maintained
F: drivers/platform/x86/dell-smbios-wmi.c
F: tools/wmi/dell-smbios-example.c
+DEFZA FDDI NETWORK DRIVER
+M: "Maciej W. Rozycki" <macro@linux-mips.org>
+S: Maintained
+F: drivers/net/fddi/defza.*
+
DELL LAPTOP DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
M: Pali Rohár <pali.rohar@gmail.com>
@@ -8857,6 +8862,15 @@ S: Supported
F: drivers/mmc/host/sdhci-xenon*
F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
+MARVELL OCTEONTX2 RVU ADMIN FUNCTION DRIVER
+M: Sunil Goutham <sgoutham@marvell.com>
+M: Linu Cherian <lcherian@marvell.com>
+M: Geetha sowjanya <gakula@marvell.com>
+M: Jerin Jacob <jerinj@marvell.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/marvell/octeontx2/af/
+
MATROX FRAMEBUFFER DRIVER
L: linux-fbdev@vger.kernel.org
S: Orphan
@@ -9681,7 +9695,8 @@ MIPS/LOONGSON2 ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
L: linux-mips@linux-mips.org
S: Maintained
-F: arch/mips/loongson64/*{2e/2f}*
+F: arch/mips/loongson64/fuloong-2e/
+F: arch/mips/loongson64/lemote-2f/
F: arch/mips/include/asm/mach-loongson64/
F: drivers/*/*loongson2*
F: drivers/*/*/*loongson2*
@@ -9888,7 +9903,7 @@ M: Peter Rosin <peda@axentia.se>
S: Maintained
F: Documentation/ABI/testing/sysfs-class-mux*
F: Documentation/devicetree/bindings/mux/
-F: include/linux/dt-bindings/mux/
+F: include/dt-bindings/mux/
F: include/linux/mux/
F: drivers/mux/
diff --git a/Makefile b/Makefile
index 6c3da3e10f07..e8b599b4dcde 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Merciless Moray
# *DOCUMENTATION*
@@ -483,13 +483,15 @@ endif
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
+GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
endif
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index b4441b0764d7..a045f3086047 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -149,7 +149,7 @@ config ARC_CPU_770
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
This core has a bunch of cool new features:
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
- Shared Address Spaces (for sharing TLB entires in MMU)
+ Shared Address Spaces (for sharing TLB entries in MMU)
-Caches: New Prog Model, Region Flush
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 99cce77ab98f..644815c0516e 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,33 +6,11 @@
# published by the Free Software Foundation.
#
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
+cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
@@ -79,7 +57,7 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 4674541eba3f..8ce6e7235915 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
task_thread_info(current)->thr_ptr;
}
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * setup usermode thread pointer #2:
+ * however for this special use of r25 in kernel, __switch_to() sets
+ * r25 for kernel needs and only in the final return path is usermode
+ * r25 setup, from pt_regs->user_r25. So set that up as well
+ */
+ c_regs->user_r25 = c_callee->r25;
+#endif
+
return 0;
}
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 7423d462d1e4..50dde84b72ed 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -123,6 +123,17 @@
};
};
+&cpu0 {
+ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
+ operating-points = <
+ /* kHz uV */
+ 166666 850000
+ 400000 900000
+ 800000 1050000
+ 1000000 1200000
+ >;
+};
+
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
index ae5fdff18406..8247bc15addc 100644
--- a/arch/arm/kernel/vmlinux.lds.h
+++ b/arch/arm/kernel/vmlinux.lds.h
@@ -49,6 +49,8 @@
#define ARM_DISCARD \
*(.ARM.exidx.exit.text) \
*(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
ARM_EXIT_DISCARD(EXIT_TEXT) \
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b2fa62922d88..49d6046ca1d0 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
+#include <linux/sizes.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
#endif
-/*
- * One page above the stack is used for branch delay slot "emulation".
- * See dsemul.c for details.
- */
-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP mips_stack_top()
/*
* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8fc69891e117..d4f7fd4550e1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
#include <linux/nmi.h>
#include <linux/cpu.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
@@ -39,6 +40,7 @@
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/irq.h>
+#include <asm/mips-cps.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
@@ -645,6 +647,29 @@ out:
return pc;
}
+unsigned long mips_stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ /* One page for branch delay slot "emulation" */
+ top -= PAGE_SIZE;
+
+ /* Space for the VDSO, data page & GIC user page */
+ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+ top -= PAGE_SIZE;
+ top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+ /* Space for cache colour alignment */
+ if (cpu_has_dc_aliases)
+ top -= shm_align_mask + 1;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c71d1eb7da59..8aaaa42f91ed 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
struct memblock_region *reg;
extern void plat_mem_setup(void);
+ /*
+ * Initialize boot_command_line to an innocuous but non-empty string in
+ * order to prevent early_init_dt_scan_chosen() from copying
+ * CONFIG_CMDLINE into it without our knowledge. We handle
+ * CONFIG_CMDLINE ourselves below & don't want to duplicate its
+ * content because repeating arguments can be problematic.
+ */
+ strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ /* call board setup routine */
+ plat_mem_setup();
+
+ /*
+ * Make sure all kernel memory is in the maps. The "UP" and
+ * "DOWN" are opposite for initdata since if it crosses over
+ * into another memory section you don't want that to be
+ * freed when the initdata is freed.
+ */
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+ arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+ PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+ BOOT_MEM_INIT_RAM);
+
+ pr_info("Determined physical RAM map:\n");
+ print_memory_map();
+
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
}
#endif
#endif
-
- /* call board setup routine */
- plat_mem_setup();
-
- /*
- * Make sure all kernel memory is in the maps. The "UP" and
- * "DOWN" are opposite for initdata since if it crosses over
- * into another memory section you don't want that to be
- * freed when the initdata is freed.
- */
- arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
- PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
- BOOT_MEM_RAM);
- arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
- PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
- BOOT_MEM_INIT_RAM);
-
- pr_info("Determined physical RAM map:\n");
- print_memory_map();
-
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 8f845f6e5f42..48a9c6b90e07 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
}
}
+static unsigned long vdso_base(void)
+{
+ unsigned long base;
+
+ /* Skip the delay slot emulation page */
+ base = STACK_TOP + PAGE_SIZE;
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (cpu_has_dc_aliases)
size += shm_align_mask + 1;
- base = get_unmapped_area(NULL, 0, size, 0, 0);
+ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 3a6f34ef5ffc..069acec3df9f 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -280,9 +280,11 @@
* unset_bytes = end_addr - current_addr + 1
* a2 = t1 - a0 + 1
*/
+ .set reorder
PTR_SUBU a2, t1, a0
+ PTR_ADDIU a2, 1
jr ra
- PTR_ADDIU a2, 1
+ .set noreorder
.endm
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 913c5725cdb2..bb6ac471a784 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
+ /*
+ * Make sure the NIP points at userspace, not kernel text/data or
+ * elsewhere.
+ */
+ if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+ pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
+ current->comm, current->pid);
+ return;
+ }
+
pr_info("%s[%d]: code: ", current->comm, current->pid);
for (i = 0; i < instructions_to_print; i++) {
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 6ae2777c220d..5ffee298745f 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
{
int err;
- /* Make sure we aren't patching a freed init section */
- if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
- pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
- return 0;
- }
-
__put_user_size(instr, patch_addr, 4, err);
if (err)
return err;
@@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
unsigned int *patch_addr = NULL;
@@ -188,12 +182,22 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+ /* Make sure we aren't patching a freed init section */
+ if (init_mem_is_free && init_section_contains(addr, 4)) {
+ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ return 0;
+ }
+ return do_patch_instruction(addr, instr);
+}
NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(unsigned int *addr, unsigned long target, int flags)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 59d07bd5374a..055b211b7126 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
* Need to ensure that NODE_DATA is initialized for a node from
* available memory (see memblock_alloc_try_nid). If unable to
* init the node, then default to nearest node that has memory
- * installed.
+ * installed. Skip onlining a node if the subsystems are not
+ * yet initialized.
*/
- if (try_online_node(new_nid))
+ if (!topology_inited || try_online_node(new_nid))
new_nid = first_online_node;
#else
/*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 3cae9168f63c..e44a8d7959f5 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_printk(const char *s);
-void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_early_printk_force(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c
index 9431784d7796..40c1dfec944e 100644
--- a/arch/s390/kernel/early_printk.c
+++ b/arch/s390/kernel/early_printk.c
@@ -10,7 +10,7 @@
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{
- __sclp_early_printk(s, len);
+ __sclp_early_printk(s, len, 0);
}
static struct console sclp_early_console = {
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index a049a7b9d6e8..c1a080b11ae9 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -198,12 +198,10 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
- ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ aghi %r15,-STACK_FRAME_OVERHEAD
larl %r2,.Lpanic_string
- lghi %r1,0
- sam31
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE
- brasl %r14,sclp_early_printk
+ brasl %r14,sclp_early_printk_force
larl %r3,.Ldisabled_wait_31
lpsw 0(%r3)
4:
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 4e8f56c3793c..cc42225c20f3 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -115,8 +115,8 @@ static int auxio_probe(struct platform_device *dev)
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
- printk("auxio: Unknown parent bus type [%s]\n",
- dp->parent->name);
+ printk("auxio: Unknown parent bus type [%pOFn]\n",
+ dp->parent);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 5868fc333ea8..639c8e54530a 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fallthru */
+ /* fall through */
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index d5f7dc6323d5..a68bbddbdba4 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fallthru */
+ /* fall through */
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 92627abce311..d941875dd718 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op)
power_reg = of_ioremap(res, 0, 0x4, "power");
- printk(KERN_INFO "%s: Control reg at %llx\n",
- op->dev.of_node->name, res->start);
+ printk(KERN_INFO "%pOFn: Control reg at %llx\n",
+ op->dev.of_node, res->start);
if (has_button_interrupt(irq, op->dev.of_node)) {
if (request_irq(irq,
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index b51cbb9e87dc..17c87d29ff20 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -68,8 +68,8 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
return;
regs = rprop->value;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
regs->which_io, regs->phys_addr);
}
@@ -84,8 +84,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
return;
regs = prop->value;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
regs->which_io,
regs->phys_addr);
}
@@ -104,13 +104,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
devfn >> 3,
devfn & 0x07);
} else {
- sprintf(tmp_buf, "%s@%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x",
+ dp,
devfn >> 3);
}
}
@@ -127,8 +127,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
regs->which_io, regs->phys_addr);
}
@@ -167,8 +167,8 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
return;
device = prop->value;
- sprintf(tmp_buf, "%s:%d:%d@%x,%x",
- dp->name, *vendor, *device,
+ sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x",
+ dp, *vendor, *device,
*intr, reg0);
}
@@ -201,7 +201,7 @@ char * __init build_path_component(struct device_node *dp)
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
- strcpy(tmp_buf, dp->name);
+ snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index baeaeed64993..6220411ce8fc 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -82,8 +82,8 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
regs = rprop->value;
if (!of_node_is_root(dp->parent)) {
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
@@ -97,17 +97,17 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
const char *prefix = (type == 0) ? "m" : "i";
if (low_bits)
- sprintf(tmp_buf, "%s@%s%x,%x",
- dp->name, prefix,
+ sprintf(tmp_buf, "%pOFn@%s%x,%x",
+ dp, prefix,
high_bits, low_bits);
else
- sprintf(tmp_buf, "%s@%s%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%s%x",
+ dp,
prefix,
high_bits);
} else if (type == 12) {
- sprintf(tmp_buf, "%s@%x",
- dp->name, high_bits);
+ sprintf(tmp_buf, "%pOFn@%x",
+ dp, high_bits);
}
}
@@ -122,8 +122,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
if (!of_node_is_root(dp->parent)) {
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
@@ -138,8 +138,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
if (tlb_type >= cheetah)
mask = 0x7fffff;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
*(u32 *)prop->value,
(unsigned int) (regs->phys_addr & mask));
}
@@ -156,8 +156,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
return;
regs = prop->value;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
regs->which_io,
regs->phys_addr);
}
@@ -176,13 +176,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
devfn >> 3,
devfn & 0x07);
} else {
- sprintf(tmp_buf, "%s@%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x",
+ dp,
devfn >> 3);
}
}
@@ -203,8 +203,8 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
if (!prop)
return;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
*(u32 *) prop->value,
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
@@ -221,7 +221,7 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
- sprintf(tmp_buf, "%s@%x", dp->name, *regs);
+ sprintf(tmp_buf, "%pOFn@%x", dp, *regs);
}
/* "name@addrhi,addrlo" */
@@ -236,8 +236,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
@@ -257,8 +257,8 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
/* This actually isn't right... should look at the #address-cells
* property of the i2c bus node etc. etc.
*/
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name, regs[0], regs[1]);
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp, regs[0], regs[1]);
}
/* "name@reg0[,reg1]" */
@@ -274,11 +274,11 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
if (prop->length == sizeof(u32) || regs[1] == 1) {
- sprintf(tmp_buf, "%s@%x",
- dp->name, regs[0]);
+ sprintf(tmp_buf, "%pOFn@%x",
+ dp, regs[0]);
} else {
- sprintf(tmp_buf, "%s@%x,%x",
- dp->name, regs[0], regs[1]);
+ sprintf(tmp_buf, "%pOFn@%x,%x",
+ dp, regs[0], regs[1]);
}
}
@@ -295,11 +295,11 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf
regs = prop->value;
if (regs[2] || regs[3]) {
- sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
- dp->name, regs[0], regs[1], regs[2], regs[3]);
+ sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x",
+ dp, regs[0], regs[1], regs[2], regs[3]);
} else {
- sprintf(tmp_buf, "%s@%08x%08x",
- dp->name, regs[0], regs[1]);
+ sprintf(tmp_buf, "%pOFn@%08x%08x",
+ dp, regs[0], regs[1]);
}
}
@@ -361,7 +361,7 @@ char * __init build_path_component(struct device_node *dp)
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
- strcpy(tmp_buf, dp->name);
+ snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index 635d67ffc9a3..7db5aabe9708 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
- dr->ncookies)];
+ VIO_MAX_RING_COOKIES)];
} u;
+ size_t bytes = sizeof(struct vio_dring_register) +
+ (sizeof(struct ldc_trans_cookie) *
+ dr->ncookies);
int i;
- memset(&u, 0, sizeof(u));
+ if (WARN_ON(bytes > sizeof(u)))
+ return -EINVAL;
+
+ memset(&u, 0, bytes);
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries;
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
(unsigned long long) u.pkt.cookies[i].cookie_size);
}
- return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+ return send_ctrl(vio, &u.pkt.tag, bytes);
}
static int send_rdx(struct vio_driver_state *vio)
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index dd0b5a92ffd0..dc85570d8839 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles)
targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
-export CPPFLAGS_vdso.lds += -P -C
+CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-Wl,--no-undefined \
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
$(DISABLE_LTO)
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
-define cmd_vdso2c
- $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 285eb3ec4200..3736f6dc9545 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
- u32 _cbm, int closid, bool exclusive);
+ unsigned long cbm, int closid, bool exclusive);
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
- u32 cbm);
+ unsigned long cbm);
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
int rdtgroup_tasks_assigned(struct rdtgroup *r);
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
int rdt_pseudo_lock_init(void);
void rdt_pseudo_lock_release(void);
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index 40f3903ae5d9..f8c260d522ca 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
/**
* rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
* @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
*
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
* pseudo-locked region on @d.
*
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
* otherwise.
*/
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
{
- unsigned long *cbm = (unsigned long *)&_cbm;
- unsigned long *cbm_b;
unsigned int cbm_len;
+ unsigned long cbm_b;
if (d->plr) {
cbm_len = d->plr->r->cache.cbm_len;
- cbm_b = (unsigned long *)&d->plr->cbm;
- if (bitmap_intersects(cbm, cbm_b, cbm_len))
+ cbm_b = d->plr->cbm;
+ if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
return true;
}
return false;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 1b8e86a5d5e1..b140c68bc14b 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
* is false then overlaps with any resource group or hardware entities
* will be considered.
*
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
* Return: false if CBM does not overlap, true if it does.
*/
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
- u32 _cbm, int closid, bool exclusive)
+ unsigned long cbm, int closid, bool exclusive)
{
- unsigned long *cbm = (unsigned long *)&_cbm;
- unsigned long *ctrl_b;
enum rdtgrp_mode mode;
+ unsigned long ctrl_b;
u32 *ctrl;
int i;
/* Check for any overlap with regions used by hardware directly */
if (!exclusive) {
- if (bitmap_intersects(cbm,
- (unsigned long *)&r->cache.shareable_bits,
- r->cache.cbm_len))
+ ctrl_b = r->cache.shareable_bits;
+ if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
return true;
}
/* Check for overlap with other resource groups */
ctrl = d->ctrl_val;
for (i = 0; i < closids_supported(); i++, ctrl++) {
- ctrl_b = (unsigned long *)ctrl;
+ ctrl_b = *ctrl;
mode = rdtgroup_mode_by_closid(i);
if (closid_allocated(i) && i != closid &&
mode != RDT_MODE_PSEUDO_LOCKSETUP) {
- if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+ if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
if (exclusive) {
if (mode == RDT_MODE_EXCLUSIVE)
return true;
@@ -1138,15 +1139,18 @@ out:
* computed by first dividing the total cache size by the CBM length to
* determine how many bytes each bit in the bitmask represents. The result
* is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
*/
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
- struct rdt_domain *d, u32 cbm)
+ struct rdt_domain *d, unsigned long cbm)
{
struct cpu_cacheinfo *ci;
unsigned int size = 0;
int num_b, i;
- num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+ num_b = bitmap_weight(&cbm, r->cache.cbm_len);
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == r->cache_level) {
@@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
u32 used_b = 0, unused_b = 0;
u32 closid = rdtgrp->closid;
struct rdt_resource *r;
+ unsigned long tmp_cbm;
enum rdtgrp_mode mode;
struct rdt_domain *d;
int i, ret;
@@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
* modify the CBM based on system availability.
*/
cbm_ensure_valid(&d->new_ctrl, r);
- if (bitmap_weight((unsigned long *) &d->new_ctrl,
- r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure
+ * that bitmap_weight() does not access out-of-bound
+ * memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n",
r->name, d->id);
return -ENOSPC;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 089e78c4effd..59274e2c1ac4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#define MAX_UNSHARED_PTRS_PER_PGD \
+ max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
* and initialize the kernel pmds here.
*/
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
+#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
/*
* We allocate separate PMDs for the kernel part of the user page-table
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
*/
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0)
+#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
+#define MAX_PREALLOCATED_PMDS 0
#define PREALLOCATED_USER_PMDS 0
+#define MAX_PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
- pmd_t *pmds[PREALLOCATED_PMDS];
+ pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
+ pmd_t *pmds[MAX_PREALLOCATED_PMDS];
pgd = _pgd_alloc();
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 99a38115b0a8..f55ffde877b5 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -106,7 +106,6 @@
static const struct atmdev_ops fore200e_ops;
-static const struct fore200e_bus fore200e_bus[];
static LIST_HEAD(fore200e_boards);
@@ -183,10 +182,9 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i
alignment = 0;
chunk->alloc_size = size + alignment;
- chunk->align_size = size;
chunk->direction = direction;
- chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
+ chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
if (chunk->alloc_addr == NULL)
return -ENOMEM;
@@ -195,8 +193,12 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i
chunk->align_addr = chunk->alloc_addr + offset;
- chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
-
+ chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
+ size, direction);
+ if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
+ kfree(chunk->alloc_addr);
+ return -ENOMEM;
+ }
return 0;
}
@@ -206,11 +208,39 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i
static void
fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
{
- fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
-
+ dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
+ chunk->direction);
kfree(chunk->alloc_addr);
}
+/*
+ * Allocate a DMA consistent chunk of memory intended to act as a communication
+ * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
+ * and the adapter.
+ */
+static int
+fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
+ int size, int nbr, int alignment)
+{
+ /* returned chunks are page-aligned */
+ chunk->alloc_size = size * nbr;
+ chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
+ &chunk->dma_addr, GFP_KERNEL);
+ if (!chunk->alloc_addr)
+ return -ENOMEM;
+ chunk->align_addr = chunk->alloc_addr;
+ return 0;
+}
+
+/*
+ * Free a DMA consistent chunk of memory.
+ */
+static void
+fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
+{
+ dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
+ chunk->dma_addr);
+}
static void
fore200e_spin(int msecs)
@@ -303,10 +333,10 @@ fore200e_uninit_bs_queue(struct fore200e* fore200e)
struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
if (status->alloc_addr)
- fore200e->bus->dma_chunk_free(fore200e, status);
+ fore200e_dma_chunk_free(fore200e, status);
if (rbd_block->alloc_addr)
- fore200e->bus->dma_chunk_free(fore200e, rbd_block);
+ fore200e_dma_chunk_free(fore200e, rbd_block);
}
}
}
@@ -372,17 +402,17 @@ fore200e_shutdown(struct fore200e* fore200e)
/* fall through */
case FORE200E_STATE_INIT_RXQ:
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
/* fall through */
case FORE200E_STATE_INIT_TXQ:
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
/* fall through */
case FORE200E_STATE_INIT_CMDQ:
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
/* fall through */
case FORE200E_STATE_INITIALIZE:
@@ -429,81 +459,6 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
writel(cpu_to_le32(val), addr);
}
-
-static u32
-fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
-{
- u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);
-
- DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
- virt_addr, size, direction, dma_addr);
-
- return dma_addr;
-}
-
-
-static void
-fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
-{
- DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
- dma_addr, size, direction);
-
- dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
-}
-
-
-static void
-fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
-{
- DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
-}
-
-static void
-fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
-{
- DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
-}
-
-
-/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
- (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
-
-static int
-fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
- int size, int nbr, int alignment)
-{
- /* returned chunks are page-aligned */
- chunk->alloc_size = size * nbr;
- chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
- chunk->alloc_size,
- &chunk->dma_addr,
- GFP_KERNEL);
-
- if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
- return -ENOMEM;
-
- chunk->align_addr = chunk->alloc_addr;
-
- return 0;
-}
-
-
-/* free a DMA consistent chunk of memory */
-
-static void
-fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
-{
- dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
- chunk->alloc_size,
- chunk->alloc_addr,
- chunk->dma_addr);
-}
-
-
static int
fore200e_pca_irq_check(struct fore200e* fore200e)
{
@@ -571,7 +526,7 @@ fore200e_pca_unmap(struct fore200e* fore200e)
static int fore200e_pca_configure(struct fore200e *fore200e)
{
- struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
+ struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
u8 master_ctrl, latency;
DPRINTK(2, "device %s being configured\n", fore200e->name);
@@ -623,7 +578,10 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
opcode.opcode = OPCODE_GET_PROM;
opcode.pad = 0;
- prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
+ prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fore200e->dev, prom_dma))
+ return -ENOMEM;
fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
@@ -635,7 +593,7 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
*entry->status = STATUS_FREE;
- fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
+ dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
if (ok == 0) {
printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
@@ -658,15 +616,31 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
static int
fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
{
- struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
+ struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
}
+static const struct fore200e_bus fore200e_pci_ops = {
+ .model_name = "PCA-200E",
+ .proc_name = "pca200e",
+ .descr_alignment = 32,
+ .buffer_alignment = 4,
+ .status_alignment = 32,
+ .read = fore200e_pca_read,
+ .write = fore200e_pca_write,
+ .configure = fore200e_pca_configure,
+ .map = fore200e_pca_map,
+ .reset = fore200e_pca_reset,
+ .prom_read = fore200e_pca_prom_read,
+ .unmap = fore200e_pca_unmap,
+ .irq_check = fore200e_pca_irq_check,
+ .irq_ack = fore200e_pca_irq_ack,
+ .proc_read = fore200e_pca_proc_read,
+};
#endif /* CONFIG_PCI */
-
#ifdef CONFIG_SBUS
static u32 fore200e_sba_read(volatile u32 __iomem *addr)
@@ -679,78 +653,6 @@ static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
sbus_writel(val, addr);
}
-static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
- u32 dma_addr;
-
- dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
-
- DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
- virt_addr, size, direction, dma_addr);
-
- return dma_addr;
-}
-
-static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
- dma_addr, size, direction);
-
- dma_unmap_single(&op->dev, dma_addr, size, direction);
-}
-
-static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction);
-}
-
-static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_device(&op->dev, dma_addr, size, direction);
-}
-
-/* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
- * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter.
- */
-static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
- int size, int nbr, int alignment)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- chunk->alloc_size = chunk->align_size = size * nbr;
-
- /* returned chunks are page-aligned */
- chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size,
- &chunk->dma_addr, GFP_ATOMIC);
-
- if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
- return -ENOMEM;
-
- chunk->align_addr = chunk->alloc_addr;
-
- return 0;
-}
-
-/* free a DVMA consistent chunk of memory */
-static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- dma_free_coherent(&op->dev, chunk->alloc_size,
- chunk->alloc_addr, chunk->dma_addr);
-}
-
static void fore200e_sba_irq_enable(struct fore200e *fore200e)
{
u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
@@ -777,7 +679,7 @@ static void fore200e_sba_reset(struct fore200e *fore200e)
static int __init fore200e_sba_map(struct fore200e *fore200e)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
unsigned int bursts;
/* gain access to the SBA specific registers */
@@ -807,7 +709,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
static void fore200e_sba_unmap(struct fore200e *fore200e)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
@@ -823,7 +725,7 @@ static int __init fore200e_sba_configure(struct fore200e *fore200e)
static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
const u8 *prop;
int len;
@@ -847,7 +749,7 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
const struct linux_prom_registers *regs;
regs = of_get_property(op->dev.of_node, "reg", NULL);
@@ -855,8 +757,26 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
(regs ? regs->which_io : 0), op->dev.of_node->name);
}
-#endif /* CONFIG_SBUS */
+static const struct fore200e_bus fore200e_sbus_ops = {
+ .model_name = "SBA-200E",
+ .proc_name = "sba200e",
+ .descr_alignment = 32,
+ .buffer_alignment = 64,
+ .status_alignment = 32,
+ .read = fore200e_sba_read,
+ .write = fore200e_sba_write,
+ .configure = fore200e_sba_configure,
+ .map = fore200e_sba_map,
+ .reset = fore200e_sba_reset,
+ .prom_read = fore200e_sba_prom_read,
+ .unmap = fore200e_sba_unmap,
+ .irq_enable = fore200e_sba_irq_enable,
+ .irq_check = fore200e_sba_irq_check,
+ .irq_ack = fore200e_sba_irq_ack,
+ .proc_read = fore200e_sba_proc_read,
+};
+#endif /* CONFIG_SBUS */
static void
fore200e_tx_irq(struct fore200e* fore200e)
@@ -884,7 +804,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
kfree(entry->data);
/* remove DMA mapping */
- fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
+ dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
DMA_TO_DEVICE);
vc_map = entry->vc_map;
@@ -1105,12 +1025,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
/* Make device DMA transfer visible to CPU. */
- fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
+ rpd->rsd[i].length, DMA_FROM_DEVICE);
skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
/* Now let the device get at it again. */
- fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
+ rpd->rsd[i].length, DMA_FROM_DEVICE);
}
DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
@@ -1611,7 +1533,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
if (tx_copy) {
- data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
+ data = kmalloc(tx_len, GFP_ATOMIC);
if (data == NULL) {
if (vcc->pop) {
vcc->pop(vcc, skb);
@@ -1679,7 +1601,14 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
entry->data = tx_copy ? data : NULL;
tpd = entry->tpd;
- tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
+ tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
+ if (tx_copy)
+ kfree(data);
+ spin_unlock_irqrestore(&fore200e->q_lock, flags);
+ return -ENOMEM;
+ }
tpd->tsd[ 0 ].length = tx_len;
FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
@@ -1747,13 +1676,15 @@ fore200e_getstats(struct fore200e* fore200e)
u32 stats_dma_addr;
if (fore200e->stats == NULL) {
- fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
+ fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
if (fore200e->stats == NULL)
return -ENOMEM;
}
- stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
- sizeof(struct stats), DMA_FROM_DEVICE);
+ stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
+ sizeof(struct stats), DMA_FROM_DEVICE);
+ if (dma_mapping_error(fore200e->dev, stats_dma_addr))
+ return -ENOMEM;
FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
@@ -1770,7 +1701,7 @@ fore200e_getstats(struct fore200e* fore200e)
*entry->status = STATUS_FREE;
- fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
+ dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
if (ok == 0) {
printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
@@ -2049,7 +1980,7 @@ static int fore200e_irq_request(struct fore200e *fore200e)
static int fore200e_get_esi(struct fore200e *fore200e)
{
- struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
+ struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
int ok, i;
if (!prom)
@@ -2156,7 +2087,7 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e)
bsq = &fore200e->host_bsq[ scheme ][ magn ];
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&bsq->status,
sizeof(enum status),
QUEUE_SIZE_BS,
@@ -2165,13 +2096,13 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e)
}
/* allocate and align the array of receive buffer descriptors */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&bsq->rbd_block,
sizeof(struct rbd_block),
QUEUE_SIZE_BS,
fore200e->bus->descr_alignment) < 0) {
- fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
+ fore200e_dma_chunk_free(fore200e, &bsq->status);
return -ENOMEM;
}
@@ -2212,7 +2143,7 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e)
DPRINTK(2, "receive queue is being initialized\n");
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&rxq->status,
sizeof(enum status),
QUEUE_SIZE_RX,
@@ -2221,13 +2152,13 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e)
}
/* allocate and align the array of receive PDU descriptors */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&rxq->rpd,
sizeof(struct rpd),
QUEUE_SIZE_RX,
fore200e->bus->descr_alignment) < 0) {
- fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
+ fore200e_dma_chunk_free(fore200e, &rxq->status);
return -ENOMEM;
}
@@ -2271,7 +2202,7 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e)
DPRINTK(2, "transmit queue is being initialized\n");
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&txq->status,
sizeof(enum status),
QUEUE_SIZE_TX,
@@ -2280,13 +2211,13 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e)
}
/* allocate and align the array of transmit PDU descriptors */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&txq->tpd,
sizeof(struct tpd),
QUEUE_SIZE_TX,
fore200e->bus->descr_alignment) < 0) {
- fore200e->bus->dma_chunk_free(fore200e, &txq->status);
+ fore200e_dma_chunk_free(fore200e, &txq->status);
return -ENOMEM;
}
@@ -2333,7 +2264,7 @@ static int fore200e_init_cmd_queue(struct fore200e *fore200e)
DPRINTK(2, "command queue is being initialized\n");
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&cmdq->status,
sizeof(enum status),
QUEUE_SIZE_CMD,
@@ -2487,25 +2418,15 @@ static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
static int fore200e_load_and_start_fw(struct fore200e *fore200e)
{
const struct firmware *firmware;
- struct device *device;
const struct fw_header *fw_header;
const __le32 *fw_data;
u32 fw_size;
u32 __iomem *load_addr;
char buf[48];
- int err = -ENODEV;
-
- if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
- device = &((struct pci_dev *) fore200e->bus_dev)->dev;
-#ifdef CONFIG_SBUS
- else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
- device = &((struct platform_device *) fore200e->bus_dev)->dev;
-#endif
- else
- return err;
+ int err;
sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
- if ((err = request_firmware(&firmware, buf, device)) < 0) {
+ if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
return err;
}
@@ -2631,7 +2552,6 @@ static const struct of_device_id fore200e_sba_match[];
static int fore200e_sba_probe(struct platform_device *op)
{
const struct of_device_id *match;
- const struct fore200e_bus *bus;
struct fore200e *fore200e;
static int index = 0;
int err;
@@ -2639,18 +2559,17 @@ static int fore200e_sba_probe(struct platform_device *op)
match = of_match_device(fore200e_sba_match, &op->dev);
if (!match)
return -EINVAL;
- bus = match->data;
fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
if (!fore200e)
return -ENOMEM;
- fore200e->bus = bus;
- fore200e->bus_dev = op;
+ fore200e->bus = &fore200e_sbus_ops;
+ fore200e->dev = &op->dev;
fore200e->irq = op->archdata.irqs[0];
fore200e->phys_base = op->resource[0].start;
- sprintf(fore200e->name, "%s-%d", bus->model_name, index);
+ sprintf(fore200e->name, "SBA-200E-%d", index);
err = fore200e_init(fore200e, &op->dev);
if (err < 0) {
@@ -2678,7 +2597,6 @@ static int fore200e_sba_remove(struct platform_device *op)
static const struct of_device_id fore200e_sba_match[] = {
{
.name = SBA200E_PROM_NAME,
- .data = (void *) &fore200e_bus[1],
},
{},
};
@@ -2698,7 +2616,6 @@ static struct platform_driver fore200e_sba_driver = {
static int fore200e_pca_detect(struct pci_dev *pci_dev,
const struct pci_device_id *pci_ent)
{
- const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
struct fore200e* fore200e;
int err = 0;
static int index = 0;
@@ -2719,20 +2636,19 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev,
goto out_disable;
}
- fore200e->bus = bus;
- fore200e->bus_dev = pci_dev;
+ fore200e->bus = &fore200e_pci_ops;
+ fore200e->dev = &pci_dev->dev;
fore200e->irq = pci_dev->irq;
fore200e->phys_base = pci_resource_start(pci_dev, 0);
- sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
+ sprintf(fore200e->name, "PCA-200E-%d", index - 1);
pci_set_master(pci_dev);
- printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
- fore200e->bus->model_name,
+ printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
- sprintf(fore200e->name, "%s-%d", bus->model_name, index);
+ sprintf(fore200e->name, "PCA-200E-%d", index);
err = fore200e_init(fore200e, &pci_dev->dev);
if (err < 0) {
@@ -2767,8 +2683,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
static const struct pci_device_id fore200e_pca_tbl[] = {
- { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, (unsigned long) &fore200e_bus[0] },
+ { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
@@ -3108,8 +3023,7 @@ module_init(fore200e_module_init);
module_exit(fore200e_module_cleanup);
-static const struct atmdev_ops fore200e_ops =
-{
+static const struct atmdev_ops fore200e_ops = {
.open = fore200e_open,
.close = fore200e_close,
.ioctl = fore200e_ioctl,
@@ -3121,53 +3035,6 @@ static const struct atmdev_ops fore200e_ops =
.owner = THIS_MODULE
};
-
-static const struct fore200e_bus fore200e_bus[] = {
-#ifdef CONFIG_PCI
- { "PCA-200E", "pca200e", 32, 4, 32,
- fore200e_pca_read,
- fore200e_pca_write,
- fore200e_pca_dma_map,
- fore200e_pca_dma_unmap,
- fore200e_pca_dma_sync_for_cpu,
- fore200e_pca_dma_sync_for_device,
- fore200e_pca_dma_chunk_alloc,
- fore200e_pca_dma_chunk_free,
- fore200e_pca_configure,
- fore200e_pca_map,
- fore200e_pca_reset,
- fore200e_pca_prom_read,
- fore200e_pca_unmap,
- NULL,
- fore200e_pca_irq_check,
- fore200e_pca_irq_ack,
- fore200e_pca_proc_read,
- },
-#endif
-#ifdef CONFIG_SBUS
- { "SBA-200E", "sba200e", 32, 64, 32,
- fore200e_sba_read,
- fore200e_sba_write,
- fore200e_sba_dma_map,
- fore200e_sba_dma_unmap,
- fore200e_sba_dma_sync_for_cpu,
- fore200e_sba_dma_sync_for_device,
- fore200e_sba_dma_chunk_alloc,
- fore200e_sba_dma_chunk_free,
- fore200e_sba_configure,
- fore200e_sba_map,
- fore200e_sba_reset,
- fore200e_sba_prom_read,
- fore200e_sba_unmap,
- fore200e_sba_irq_enable,
- fore200e_sba_irq_check,
- fore200e_sba_irq_ack,
- fore200e_sba_proc_read,
- },
-#endif
- {}
-};
-
MODULE_LICENSE("GPL");
#ifdef CONFIG_PCI
#ifdef __LITTLE_ENDIAN__
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index c8a02c8fba15..caf0ea6a328a 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -805,12 +805,6 @@ typedef struct fore200e_bus {
int status_alignment; /* status words DMA alignment requirement */
u32 (*read)(volatile u32 __iomem *);
void (*write)(u32, volatile u32 __iomem *);
- u32 (*dma_map)(struct fore200e*, void*, int, int);
- void (*dma_unmap)(struct fore200e*, u32, int, int);
- void (*dma_sync_for_cpu)(struct fore200e*, u32, int, int);
- void (*dma_sync_for_device)(struct fore200e*, u32, int, int);
- int (*dma_chunk_alloc)(struct fore200e*, struct chunk*, int, int, int);
- void (*dma_chunk_free)(struct fore200e*, struct chunk*);
int (*configure)(struct fore200e*);
int (*map)(struct fore200e*);
void (*reset)(struct fore200e*);
@@ -844,7 +838,7 @@ typedef struct fore200e {
enum fore200e_state state; /* device state */
char name[16]; /* device name */
- void* bus_dev; /* bus-specific kernel data */
+ struct device *dev;
int irq; /* irq number */
unsigned long phys_base; /* physical base address */
void __iomem * virt_base; /* virtual base address */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index b3c0498ee433..8e9213b36e31 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
}
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
- if (tmp && !(opt_flags & FW_OPT_NOCACHE))
- list_add(&tmp->list, &fwc->head);
+ if (tmp) {
+ INIT_LIST_HEAD(&tmp->list);
+ if (!(opt_flags & FW_OPT_NOCACHE))
+ list_add(&tmp->list, &fwc->head);
+ }
spin_unlock(&fwc->lock);
*fw_priv = tmp;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index d98ed0442201..9f1392fc7105 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1356,7 +1356,7 @@ static int qca_init_regulators(struct qca_power *qca,
{
int i;
- qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+ qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!qca->vreg_bulk)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 7e71043457a6..86c699c14f84 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
- priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ priv->ring = devm_kcalloc(dev, priv->config.rings,
+ sizeof(*priv->ring),
GFP_KERNEL);
if (!priv->ring) {
ret = -ENOMEM;
@@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
- priv->ring[i].rdr_req = devm_kzalloc(dev,
- sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ priv->ring[i].rdr_req = devm_kcalloc(dev,
+ EIP197_DEFAULT_RING_SIZE,
+ sizeof(priv->ring[i].rdr_req),
GFP_KERNEL);
if (!priv->ring[i].rdr_req) {
ret = -ENOMEM;
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 0b7e19c27c6d..51a5ac2293a7 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -14,6 +14,7 @@
*/
#include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-region.h>
#include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ eprobe_mgr_put:
static int fme_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+ struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
- fpga_mgr_put(region->mgr);
+ fpga_mgr_put(mgr);
return 0;
}
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 24b8f98b73ec..c983dac97501 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
*
* Given a device, get an exclusive reference to a fpga bridge.
*
- * Return: fpga manager struct or IS_ERR() condition containing error code.
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
*/
struct fpga_bridge *fpga_bridge_get(struct device *dev,
struct fpga_image_info *info)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 35fabb8083fb..052a1342ab7e 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -437,9 +437,10 @@ eprobe_mgr_put:
static int of_fpga_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
+ struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
- fpga_mgr_put(region->mgr);
+ fpga_mgr_put(mgr);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a57300c1d649..25187403e3ac 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1682,7 +1682,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq.parents = &parent_irq;
+ gpiochip->irq.parent_irq = parent_irq;
+ gpiochip->irq.parents = &gpiochip->irq.parent_irq;
gpiochip->irq.num_parents = 1;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 0b976dfd04df..92ecb9bf982c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
sizeof(struct drm_plane),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 790d39f816dc..b557687b1964 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
return 0;
}
- mp->clk_config = devm_kzalloc(&pdev->dev,
- sizeof(struct dss_clk) * num_clk,
+ mp->clk_config = devm_kcalloc(&pdev->dev,
+ num_clk, sizeof(struct dss_clk),
GFP_KERNEL);
if (!mp->clk_config)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 5691dfa1db6f..041e7daf8a33 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -900,9 +900,22 @@ static enum drm_connector_status
nv50_mstc_detect(struct drm_connector *connector, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
+ enum drm_connector_status conn_status;
+ int ret;
+
if (!mstc->port)
return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return connector_status_disconnected;
+
+ conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+ mstc->port);
+
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return conn_status;
}
static void
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ced041899456..f4d08c8ac7f8 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
__u32 version)
{
int ret = 0;
+ unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* the CPU attempting to connect may not be CPU 0.
*/
if (version >= VERSION_WIN8_1) {
- msg->target_vcpu =
- hv_cpu_number_to_vp_number(smp_processor_id());
- vmbus_connection.connect_cpu = smp_processor_id();
+ cur_cpu = get_cpu();
+ msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+ vmbus_connection.connect_cpu = cur_cpu;
+ put_cpu();
} else {
msg->target_vcpu = 0;
vmbus_connection.connect_cpu = 0;
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 8474d601aa63..b998f9fbed41 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
if (fan_cnt < 1)
return -EINVAL;
- fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
if (!fan_ch)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 94d94b4a9a0d..18cc324f3ca9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- u32 ic_clk = i2c_dw_clk_rate(dev);
const char *mode_str, *fp_str = "";
u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
+ u32 ic_clk;
int ret;
ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
/* Calculate SCL timing parameters for standard mode if not set */
if (!dev->ss_hcnt || !dev->ss_lcnt) {
+ ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt =
i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
* needed also in high speed mode.
*/
if (!dev->fs_hcnt || !dev->fs_lcnt) {
+ ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 0cf1379f4e80..5c754bf659e2 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
* run ~75 kHz instead which should do no harm.
*/
dev_notice(&sch_adapter.dev,
- "Clock divider unitialized. Setting defaults\n");
+ "Clock divider uninitialized. Setting defaults\n");
outw(backbone_speed / (4 * 100), SMBHSTCLK);
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 36732eb688a4..9f2eb02481d3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t rx_dma;
enum geni_se_xfer_mode mode;
unsigned long time_left = XFER_TIMEOUT;
+ void *dma_buf;
gi2c->cur = msg;
- mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ mode = GENI_SE_FIFO;
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+ mode = GENI_SE_DMA;
+
geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
if (mode == GENI_SE_DMA) {
int ret;
- ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
&rx_dma);
if (ret) {
mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
}
}
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err)
geni_i2c_rx_fsm_rst(gi2c);
geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
}
return gi2c->err;
}
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t tx_dma;
enum geni_se_xfer_mode mode;
unsigned long time_left;
+ void *dma_buf;
gi2c->cur = msg;
- mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ mode = GENI_SE_FIFO;
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+ mode = GENI_SE_DMA;
+
geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
if (mode == GENI_SE_DMA) {
int ret;
- ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
&tx_dma);
if (ret) {
mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
}
}
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err)
geni_i2c_tx_fsm_rst(gi2c);
geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
}
return gi2c->err;
}
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index a01389b85f13..7e9a2bbf5ddc 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6ee03d6089eb..8ec7418e99f0 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2621,3 +2621,49 @@ void ib_drain_qp(struct ib_qp *qp)
ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);
+
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *))
+{
+ struct rdma_netdev_alloc_params params;
+ struct net_device *netdev;
+ int rc;
+
+ if (!device->rdma_netdev_get_params)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ rc = device->rdma_netdev_get_params(device, port_num, type, &params);
+ if (rc)
+ return ERR_PTR(rc);
+
+ netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
+ setup, params.txqs, params.rxqs);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ return netdev;
+}
+EXPORT_SYMBOL(rdma_alloc_netdev);
+
+int rdma_init_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ struct net_device *netdev)
+{
+ struct rdma_netdev_alloc_params params;
+ int rc;
+
+ if (!device->rdma_netdev_get_params)
+ return -EOPNOTSUPP;
+
+ rc = device->rdma_netdev_get_params(device, port_num, type, &params);
+ if (rc)
+ return rc;
+
+ return params.initialize_rdma_netdev(device, port_num,
+ netdev, params.param);
+}
+EXPORT_SYMBOL(rdma_init_netdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c414f3809e5c..5d9b7f62a0ba 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -5163,22 +5163,14 @@ done:
return num_counters;
}
-static struct net_device*
-mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
- u8 port_num,
- enum rdma_netdev_t type,
- const char *name,
- unsigned char name_assign_type,
- void (*setup)(struct net_device *))
+static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
{
- struct net_device *netdev;
-
if (type != RDMA_NETDEV_IPOIB)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
- netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
- name, setup);
- return netdev;
+ return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
}
static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
@@ -5824,8 +5816,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
- if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
- dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
+ dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params;
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9fb1d9cb9401..e22314837645 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int shrink = 0;
int c;
+ if (!mr->allocated_from_cache)
+ return;
+
c = order2idx(dev, mr->order);
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
umem = NULL;
}
#endif
-
clean_mr(dev, mr);
+ /*
+ * We should unregister the DMA address from the HCA before
+ * remove the DMA mapping.
+ */
+ mlx5_mr_cache_free(dev, mr);
if (umem) {
ib_umem_release(umem);
atomic_sub(npages, &dev->mdev->priv.reg_pages);
}
-
if (!mr->allocated_from_cache)
kfree(mr);
- else
- mlx5_mr_cache_free(dev, mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1abe3c62f106..1da119d901a9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -499,8 +499,10 @@ void ipoib_reap_ah(struct work_struct *work);
struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
-struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
- const char *format);
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+ const char *format);
+int ipoib_intf_init(struct ib_device *hca, u8 port, const char *format,
+ struct net_device *dev);
void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
@@ -531,6 +533,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req);
+struct rtnl_link_ops *ipoib_get_link_ops(void);
+
static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e3d28f9ad9c0..8baa75a705c5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2115,82 +2115,58 @@ static const struct net_device_ops ipoib_netdev_default_pf = {
.ndo_stop = ipoib_ib_dev_stop_default,
};
-static struct net_device
-*ipoib_create_netdev_default(struct ib_device *hca,
- const char *name,
- unsigned char name_assign_type,
- void (*setup)(struct net_device *))
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
+ const char *name)
{
struct net_device *dev;
- struct rdma_netdev *rn;
- dev = alloc_netdev((int)sizeof(struct rdma_netdev),
- name,
- name_assign_type, setup);
- if (!dev)
- return NULL;
-
- rn = netdev_priv(dev);
-
- rn->send = ipoib_send;
- rn->attach_mcast = ipoib_mcast_attach;
- rn->detach_mcast = ipoib_mcast_detach;
- rn->hca = hca;
- dev->netdev_ops = &ipoib_netdev_default_pf;
-
- return dev;
-}
-
-static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port,
- const char *name)
-{
- struct net_device *dev;
-
- if (hca->alloc_rdma_netdev) {
- dev = hca->alloc_rdma_netdev(hca, port,
- RDMA_NETDEV_IPOIB, name,
- NET_NAME_UNKNOWN,
- ipoib_setup_common);
- if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP)
- return NULL;
- }
-
- if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP)
- dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN,
- ipoib_setup_common);
+ dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common);
+ if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
+ return dev;
+ dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
+ ipoib_setup_common);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
return dev;
}
-struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
- const char *name)
+int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
+ struct net_device *dev)
{
- struct net_device *dev;
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_dev_priv *priv;
- struct rdma_netdev *rn;
+ int rc;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return NULL;
+ return -ENOMEM;
priv->ca = hca;
priv->port = port;
- dev = ipoib_get_netdev(hca, port, name);
- if (!dev)
- goto free_priv;
+ rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common, dev);
+ if (rc) {
+ if (rc != -EOPNOTSUPP)
+ goto out;
+
+ dev->netdev_ops = &ipoib_netdev_default_pf;
+ rn->send = ipoib_send;
+ rn->attach_mcast = ipoib_mcast_attach;
+ rn->detach_mcast = ipoib_mcast_detach;
+ rn->hca = hca;
+ }
priv->rn_ops = dev->netdev_ops;
- /* fixme : should be after the query_cap */
- if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
+ if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION)
dev->netdev_ops = &ipoib_netdev_ops_vf;
else
dev->netdev_ops = &ipoib_netdev_ops_pf;
- rn = netdev_priv(dev);
rn->clnt_priv = priv;
-
/*
* Only the child register_netdev flows can handle priv_destructor
* being set, so we force it to NULL here and handle manually until it
@@ -2201,10 +2177,35 @@ struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
ipoib_build_priv(dev);
- return priv;
-free_priv:
+ return 0;
+
+out:
kfree(priv);
- return NULL;
+ return rc;
+}
+
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+ const char *name)
+{
+ struct net_device *dev;
+ int rc;
+
+ dev = ipoib_alloc_netdev(hca, port, name);
+ if (IS_ERR(dev))
+ return dev;
+
+ rc = ipoib_intf_init(hca, port, name, dev);
+ if (rc) {
+ free_netdev(dev);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * Upon success the caller must ensure ipoib_intf_free is called or
+ * register_netdevice succeed'd and priv_destructor is set to
+ * ipoib_intf_free.
+ */
+ return dev;
}
void ipoib_intf_free(struct net_device *dev)
@@ -2387,16 +2388,19 @@ int ipoib_add_pkey_attr(struct net_device *dev)
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
+ struct rtnl_link_ops *ops = ipoib_get_link_ops();
+ struct rdma_netdev_alloc_params params;
struct ipoib_dev_priv *priv;
struct net_device *ndev;
int result;
- priv = ipoib_intf_alloc(hca, port, format);
- if (!priv) {
- pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
- return ERR_PTR(-ENOMEM);
+ ndev = ipoib_intf_alloc(hca, port, format);
+ if (IS_ERR(ndev)) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
+ PTR_ERR(ndev));
+ return ndev;
}
- ndev = priv->dev;
+ priv = ipoib_priv(ndev);
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
@@ -2417,6 +2421,14 @@ static struct net_device *ipoib_add_port(const char *format,
return ERR_PTR(result);
}
+ if (hca->rdma_netdev_get_params) {
+ int rc = hca->rdma_netdev_get_params(hca, port,
+ RDMA_NETDEV_IPOIB,
+ &params);
+
+ if (!rc && ops->priv_size < params.sizeof_priv)
+ ops->priv_size = params.sizeof_priv;
+ }
/*
* We cannot set priv_destructor before register_netdev because we
* need priv to be always valid during the error flow to execute
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index d4d553a51fa9..38c984d16996 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -122,12 +122,26 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+ err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
+ if (err) {
+ ipoib_warn(ppriv, "failed to initialize pkey device\n");
+ return err;
+ }
+
err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
child_pkey, IPOIB_RTNL_CHILD);
+ if (err)
+ return err;
- if (!err && data)
+ if (data) {
err = ipoib_changelink(dev, tb, data, extack);
- return err;
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
+ }
+
+ return 0;
}
static size_t ipoib_get_size(const struct net_device *dev)
@@ -149,6 +163,11 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.fill_info = ipoib_fill_info,
};
+struct rtnl_link_ops *ipoib_get_link_ops(void)
+{
+ return &ipoib_link_ops;
+}
+
int __init ipoib_netlink_init(void)
{
return rtnl_link_register(&ipoib_link_ops);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 341753fbda54..8ac8e18fbe0c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -85,7 +85,7 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
/*
* NOTE: If this function fails then the priv->dev will remain valid, however
- * priv can have been freed and must not be touched by caller in the error
+ * priv will have been freed and must not be touched by caller in the error
* case.
*
* If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
@@ -101,6 +101,12 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
ASSERT_RTNL();
/*
+ * We do not need to touch priv if register_netdevice fails, so just
+ * always use this flow.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ /*
* Racing with unregister of the parent must be prevented by the
* caller.
*/
@@ -120,9 +126,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto out_early;
}
- /* We do not need to touch priv if register_netdevice fails */
- ndev->priv_destructor = ipoib_intf_free;
-
result = register_netdevice(ndev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
@@ -182,12 +185,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
snprintf(intf_name, sizeof(intf_name), "%s.%04x",
ppriv->dev->name, pkey);
- priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
- if (!priv) {
- result = -ENOMEM;
+ ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (IS_ERR(ndev)) {
+ result = PTR_ERR(ndev);
goto out;
}
- ndev = priv->dev;
+ priv = ipoib_priv(ndev);
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 370206f987f9..f48369d6f3a0 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
+ cond_resched();
}
out:
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cd620e009bad..d4b9db487b16 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -231,6 +231,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index eb14ddf69346..8ec483e8688b 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
+ cond_resched();
}
return bytes;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index e08228061bcd..412fa71245af 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
+ cond_resched();
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index b8bc71569349..95a78ccbd847 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void)
for (i = 0; i < I8042_NUM_PORTS; i++) {
struct serio *serio = i8042_ports[i].serio;
- if (serio) {
- printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
- serio->name,
- (unsigned long) I8042_DATA_REG,
- (unsigned long) I8042_COMMAND_REG,
- i8042_ports[i].irq);
- serio_register_port(serio);
- device_set_wakeup_capable(&serio->dev, true);
- }
+ if (!serio)
+ continue;
+
+ printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
+ serio->name,
+ (unsigned long) I8042_DATA_REG,
+ (unsigned long) I8042_COMMAND_REG,
+ i8042_ports[i].irq);
+ serio_register_port(serio);
+ device_set_wakeup_capable(&serio->dev, true);
+
+ /*
+ * On platforms using suspend-to-idle, allow the keyboard to
+ * wake up the system from sleep by enabling keyboard wakeups
+ * by default. This is consistent with keyboard wakeup
+ * behavior on many platforms using suspend-to-RAM (ACPI S3)
+ * by default.
+ */
+ if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
+ device_set_wakeup_enable(&serio->dev, true);
}
}
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 77debda2221b..6c336366128c 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -625,7 +625,7 @@ Amd7930_l1hw(struct PStack *st, int pr, void *arg)
break;
case (HW_RESET | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
- if ((cs->dc.amd7930.ph_state == 8)) {
+ if (cs->dc.amd7930.ph_state == 8) {
/* b-channels off, PH-AR cleared
* change to F3 */
Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index e13d991e9fb5..b29a8327eed1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3484,14 +3484,13 @@ static int __init dm_cache_init(void)
int r;
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
- if (!migration_cache) {
- dm_unregister_target(&cache_target);
+ if (!migration_cache)
return -ENOMEM;
- }
r = dm_register_target(&cache_target);
if (r) {
DMERR("cache target registration failed: %d", r);
+ kmem_cache_destroy(migration_cache);
return r;
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 21d126a5078c..32aabe27b37c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 5, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
+#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 89ccb64342de..e1fa6baf4e8e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3462,7 +3462,8 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL);
+ ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
+ ic->tag_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index d10964d41fd7..2f7c44a006c4 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+#ifdef CONFIG_BLK_DEV_ZONED
static int linear_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
}
+#endif
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
+ .end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+#else
+ .features = DM_TARGET_PASSES_INTEGRITY,
+#endif
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
- .end_io = linear_end_io,
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 20f7e4ef5342..45abb54037fc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the target device. The zone descriptors
- * must be remapped to match their position within the dm device.
- * A target may call dm_remap_zone_report after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
- * from the target device mapping to the dm device.
+ * The zone descriptors obtained with a zone report indicate zone positions
+ * within the target backing device, regardless of that device is a partition
+ * and regardless of the target mapping start sector on the device or partition.
+ * The zone descriptors start sector and write pointer position must be adjusted
+ * to match their relative position within the dm device.
+ * A target may call dm_remap_zone_report() after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
+ * backing device.
*/
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
struct blk_zone *zone;
unsigned int nr_rep = 0;
unsigned int ofst;
+ sector_t part_offset;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
@@ -1179,6 +1182,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
return;
/*
+ * bio sector was incremented by the request size on completion. Taking
+ * into account the original request sector, the target start offset on
+ * the backing device and the target mapping offset (ti->begin), the
+ * start sector of the backing device. The partition offset is always 0
+ * if the target uses a whole device.
+ */
+ part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+
+ /*
* Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position.
*/
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
/* Set zones start sector */
while (hdr->nr_zones && ofst < bvec.bv_len) {
zone = addr + ofst;
+ zone->start -= part_offset;
if (zone->start >= start + ti->len) {
hdr->nr_zones = 0;
break;
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
- zone->wp = zone->wp + ti->begin - start;
+ zone->wp = zone->wp + ti->begin - start - part_offset;
}
ofst += sizeof(struct blk_zone);
hdr->nr_zones--;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index a0b9102c4c6e..e201ccb3fda4 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1371,6 +1371,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
if (brq->data.blocks > 1) {
/*
+ * Some SD cards in SPI mode return a CRC error or even lock up
+ * completely when trying to read the last block using a
+ * multiblock read command.
+ */
+ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
+ (blk_rq_pos(req) + blk_rq_sectors(req) ==
+ get_capacity(md->disk)))
+ brq->data.blocks--;
+
+ /*
* After a read error, we redo the request one sector
* at a time in order to accurately determine which
* sectors can be read successfully.
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 0f7cf54e3234..89096f10f4c4 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -128,4 +128,4 @@ module_spi_driver(adgs1408_driver);
MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1fc27e149e7f..3017ecf82ca5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -702,7 +702,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int port;
int ret;
ret = bcm_sf2_sw_rst(priv);
@@ -714,14 +713,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
- for (port = 0; port < DSA_MAX_PORTS; port++) {
- if (dsa_is_user_port(ds, port))
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- }
-
- bcm_sf2_enable_acb(ds);
+ ds->ops->setup(ds);
return 0;
}
@@ -1172,10 +1164,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
- /* Disable all ports and interrupts */
priv->wol_ports_mask = 0;
- bcm_sf2_sw_suspend(priv->dev->ds);
dsa_unregister_switch(priv->dev->ds);
+ /* Disable all ports and interrupts */
+ bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4532e574ebcd..9f80b73f90b1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -32,115 +32,81 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
-enum ena_admin_aq_opcode {
- ENA_ADMIN_CREATE_SQ = 1,
-
- ENA_ADMIN_DESTROY_SQ = 2,
-
- ENA_ADMIN_CREATE_CQ = 3,
-
- ENA_ADMIN_DESTROY_CQ = 4,
- ENA_ADMIN_GET_FEATURE = 8,
-
- ENA_ADMIN_SET_FEATURE = 9,
-
- ENA_ADMIN_GET_STATS = 11,
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+ ENA_ADMIN_DESTROY_SQ = 2,
+ ENA_ADMIN_CREATE_CQ = 3,
+ ENA_ADMIN_DESTROY_CQ = 4,
+ ENA_ADMIN_GET_FEATURE = 8,
+ ENA_ADMIN_SET_FEATURE = 9,
+ ENA_ADMIN_GET_STATS = 11,
};
enum ena_admin_aq_completion_status {
- ENA_ADMIN_SUCCESS = 0,
-
- ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
-
- ENA_ADMIN_BAD_OPCODE = 2,
-
- ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
-
- ENA_ADMIN_MALFORMED_REQUEST = 4,
-
+ ENA_ADMIN_SUCCESS = 0,
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ ENA_ADMIN_BAD_OPCODE = 2,
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
/* Additional status is provided in ACQ entry extended_status */
- ENA_ADMIN_ILLEGAL_PARAMETER = 5,
-
- ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_RESOURCE_BUSY = 7,
};
enum ena_admin_aq_feature_id {
- ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
-
- ENA_ADMIN_MAX_QUEUES_NUM = 2,
-
- ENA_ADMIN_HW_HINTS = 3,
-
- ENA_ADMIN_RSS_HASH_FUNCTION = 10,
-
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
-
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
-
- ENA_ADMIN_MTU = 14,
-
- ENA_ADMIN_RSS_HASH_INPUT = 18,
-
- ENA_ADMIN_INTERRUPT_MODERATION = 20,
-
- ENA_ADMIN_AENQ_CONFIG = 26,
-
- ENA_ADMIN_LINK_CONFIG = 27,
-
- ENA_ADMIN_HOST_ATTR_CONFIG = 28,
-
- ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
+ ENA_ADMIN_LLQ = 4,
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_MTU = 14,
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+ ENA_ADMIN_AENQ_CONFIG = 26,
+ ENA_ADMIN_LINK_CONFIG = 27,
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
- ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
-
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
/* descriptors and headers are in device memory (a.k.a Low Latency
* Queue)
*/
- ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
};
enum ena_admin_link_types {
- ENA_ADMIN_LINK_SPEED_1G = 0x1,
-
- ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
-
- ENA_ADMIN_LINK_SPEED_5G = 0x4,
-
- ENA_ADMIN_LINK_SPEED_10G = 0x8,
-
- ENA_ADMIN_LINK_SPEED_25G = 0x10,
-
- ENA_ADMIN_LINK_SPEED_40G = 0x20,
-
- ENA_ADMIN_LINK_SPEED_50G = 0x40,
-
- ENA_ADMIN_LINK_SPEED_100G = 0x80,
-
- ENA_ADMIN_LINK_SPEED_200G = 0x100,
-
- ENA_ADMIN_LINK_SPEED_400G = 0x200,
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
};
enum ena_admin_completion_policy_type {
/* completion queue entry for each sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
-
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
/* completion queue entry upon request in sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
-
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
/* current queue head pointer is updated in OS memory upon sq
* descriptor request
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
-
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
/* current queue head pointer is updated in OS memory for each sq
* descriptor
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
};
/* basic stats return ena_admin_basic_stats while extanded stats return a
@@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type {
* device id
*/
enum ena_admin_get_stats_type {
- ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
-
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
};
enum ena_admin_get_stats_scope {
- ENA_ADMIN_SPECIFIC_QUEUE = 0,
-
- ENA_ADMIN_ETH_TRAFFIC = 1,
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+ ENA_ADMIN_ETH_TRAFFIC = 1,
};
struct ena_admin_aq_common_desc {
@@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc {
u16 extended_status;
- /* serves as a hint what AQ entries can be revoked */
+ /* indicates to the driver which AQ entry has been consumed by the
+ * device and could be reused
+ */
u16 sq_head_indx;
};
@@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd {
};
enum ena_admin_sq_direction {
- ENA_ADMIN_SQ_DIRECTION_TX = 1,
-
- ENA_ADMIN_SQ_DIRECTION_RX = 2,
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
};
struct ena_admin_acq_create_sq_resp_desc {
@@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc {
u32 max_mtu;
};
+enum ena_admin_llq_header_location {
+ /* header is in descriptor list */
+ ENA_ADMIN_INLINE_HEADER = 1,
+ /* header in a separate ring, implies 16B descriptor list entry */
+ ENA_ADMIN_HEADER_RING = 2,
+};
+
+enum ena_admin_llq_ring_entry_size {
+ ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1,
+ ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2,
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4,
+};
+
+enum ena_admin_llq_num_descs_before_header {
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
+};
+
+/* packet descriptor list entry always starts with one or more descriptors,
+ * followed by a header. The rest of the descriptors are located in the
+ * beginning of the subsequent entry. Stride refers to how the rest of the
+ * descriptors are placed. This field is relevant only for inline header
+ * mode
+ */
+enum ena_admin_llq_stride_ctrl {
+ ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1,
+ ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
+};
+
+struct ena_admin_feature_llq_desc {
+ u32 max_llq_num;
+
+ u32 max_llq_depth;
+
+ /* specify the header locations the device supports. bitfield of
+ * enum ena_admin_llq_header_location.
+ */
+ u16 header_location_ctrl_supported;
+
+ /* the header location the driver selected to use. */
+ u16 header_location_ctrl_enabled;
+
+ /* if inline header is specified - this is the size of descriptor
+ * list entry. If header in a separate ring is specified - this is
+ * the size of header ring entry. bitfield of enum
+ * ena_admin_llq_ring_entry_size. specify the entry sizes the device
+ * supports
+ */
+ u16 entry_size_ctrl_supported;
+
+ /* the entry size the driver selected to use. */
+ u16 entry_size_ctrl_enabled;
+
+ /* valid only if inline header is specified. First entry associated
+ * with the packet includes descriptors and header. Rest of the
+ * entries occupied by descriptors. This parameter defines the max
+ * number of descriptors precedding the header in the first entry.
+ * The field is bitfield of enum
+ * ena_admin_llq_num_descs_before_header and specify the values the
+ * device supports
+ */
+ u16 desc_num_before_header_supported;
+
+ /* the desire field the driver selected to use */
+ u16 desc_num_before_header_enabled;
+
+ /* valid only if inline was chosen. bitfield of enum
+ * ena_admin_llq_stride_ctrl
+ */
+ u16 descriptors_stride_ctrl_supported;
+
+ /* the stride control the driver selected to use */
+ u16 descriptors_stride_ctrl_enabled;
+};
+
struct ena_admin_queue_feature_desc {
- /* including LLQs */
u32 max_sq_num;
u32 max_sq_depth;
@@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc {
u32 max_cq_depth;
- u32 max_llq_num;
+ u32 max_legacy_llq_num;
- u32 max_llq_depth;
+ u32 max_legacy_llq_depth;
u32 max_header_size;
@@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc {
};
enum ena_admin_hash_functions {
- ENA_ADMIN_TOEPLITZ = 1,
-
- ENA_ADMIN_CRC32 = 2,
+ ENA_ADMIN_TOEPLITZ = 1,
+ ENA_ADMIN_CRC32 = 2,
};
struct ena_admin_feature_rss_flow_hash_control {
@@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function {
/* RSS flow hash protocols */
enum ena_admin_flow_hash_proto {
- ENA_ADMIN_RSS_TCP4 = 0,
-
- ENA_ADMIN_RSS_UDP4 = 1,
-
- ENA_ADMIN_RSS_TCP6 = 2,
-
- ENA_ADMIN_RSS_UDP6 = 3,
-
- ENA_ADMIN_RSS_IP4 = 4,
-
- ENA_ADMIN_RSS_IP6 = 5,
-
- ENA_ADMIN_RSS_IP4_FRAG = 6,
-
- ENA_ADMIN_RSS_NOT_IP = 7,
-
+ ENA_ADMIN_RSS_TCP4 = 0,
+ ENA_ADMIN_RSS_UDP4 = 1,
+ ENA_ADMIN_RSS_TCP6 = 2,
+ ENA_ADMIN_RSS_UDP6 = 3,
+ ENA_ADMIN_RSS_IP4 = 4,
+ ENA_ADMIN_RSS_IP6 = 5,
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+ ENA_ADMIN_RSS_NOT_IP = 7,
/* TCPv6 with extension header */
- ENA_ADMIN_RSS_TCP6_EX = 8,
-
+ ENA_ADMIN_RSS_TCP6_EX = 8,
/* IPv6 with extension header */
- ENA_ADMIN_RSS_IP6_EX = 9,
-
- ENA_ADMIN_RSS_PROTO_NUM = 16,
+ ENA_ADMIN_RSS_IP6_EX = 9,
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
};
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = BIT(0),
-
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = BIT(1),
-
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = BIT(2),
-
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = BIT(3),
-
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = BIT(4),
-
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = BIT(5),
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
struct ena_admin_proto_input {
@@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input {
};
enum ena_admin_os_type {
- ENA_ADMIN_OS_LINUX = 1,
-
- ENA_ADMIN_OS_WIN = 2,
-
- ENA_ADMIN_OS_DPDK = 3,
-
- ENA_ADMIN_OS_FREEBSD = 4,
-
- ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_LINUX = 1,
+ ENA_ADMIN_OS_WIN = 2,
+ ENA_ADMIN_OS_DPDK = 3,
+ ENA_ADMIN_OS_FREEBSD = 4,
+ ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_ESXI = 6,
+ ENA_ADMIN_OS_GROUPS_NUM = 6,
};
struct ena_admin_host_info {
@@ -723,11 +747,27 @@ struct ena_admin_host_info {
/* 7:0 : major
* 15:8 : minor
* 23:16 : sub_minor
+ * 31:24 : module_type
*/
u32 driver_version;
/* features bitmap */
- u32 supported_network_features[4];
+ u32 supported_network_features[2];
+
+ /* ENA spec version of driver */
+ u16 ena_spec_version;
+
+ /* ENA device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /* Number of CPUs */
+ u16 num_cpus;
+
+ u16 reserved;
};
struct ena_admin_rss_ind_table_entry {
@@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_llq_desc llq;
+
struct ena_admin_queue_feature_desc max_queue;
struct ena_admin_feature_aenq_desc aenq;
@@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd {
/* rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
+
+ /* LLQ configuration */
+ struct ena_admin_feature_llq_desc llq;
} u;
};
@@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc {
/* asynchronous event notification groups */
enum ena_admin_aenq_group {
- ENA_ADMIN_LINK_CHANGE = 0,
-
- ENA_ADMIN_FATAL_ERROR = 1,
-
- ENA_ADMIN_WARNING = 2,
-
- ENA_ADMIN_NOTIFICATION = 3,
-
- ENA_ADMIN_KEEP_ALIVE = 4,
-
- ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+ ENA_ADMIN_LINK_CHANGE = 0,
+ ENA_ADMIN_FATAL_ERROR = 1,
+ ENA_ADMIN_WARNING = 2,
+ ENA_ADMIN_NOTIFICATION = 3,
+ ENA_ADMIN_KEEP_ALIVE = 4,
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
enum ena_admin_aenq_notification_syndrom {
- ENA_ADMIN_SUSPEND = 0,
-
- ENA_ADMIN_RESUME = 1,
-
- ENA_ADMIN_UPDATE_HINTS = 2,
+ ENA_ADMIN_SUSPEND = 0,
+ ENA_ADMIN_RESUME = 1,
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
struct ena_admin_aenq_entry {
@@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp {
};
/* aq_common_desc */
-#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
/* sq */
-#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
/* acq_common_desc */
-#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
/* aq_create_sq_cmd */
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
/* aq_create_cq_cmd */
@@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
/* get_set_feature_common_desc */
-#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
/* get_feature_link_desc */
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
/* feature_offload_desc */
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
@@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
/* feature_rss_flow_hash_function */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
@@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* feature_rss_flow_hash_input */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
/* host_info */
-#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
-#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
-#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
-#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
-#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24)
+#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3
+#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
/* aenq_common_desc */
-#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
/* aenq_link_change_desc */
-#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 7635c38e77dd..420cede41ca4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,9 +41,6 @@
#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
- | (ENA_COMMON_SPEC_VERSION_MINOR))
#define ENA_CTRL_MAJOR 0
#define ENA_CTRL_MINOR 0
@@ -61,6 +58,8 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
+
#define ENA_REGS_ADMIN_INTR_MASK 1
#define ENA_POLL_MS 5
@@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = atomic_read(&admin_queue->outstanding_cmds);
+ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
pr_debug("admin queue is full.\n");
admin_queue->stats.out_of_space++;
@@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
struct ena_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
- unsigned long flags;
+ unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
- io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
+ io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
sizeof(struct ena_eth_io_tx_desc) :
@@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
&io_sq->desc_addr.phys_addr,
GFP_KERNEL);
}
- } else {
+
+ if (!io_sq->desc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+ }
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Allocate bounce buffers */
+ io_sq->bounce_buf_ctrl.buffer_size =
+ ena_dev->llq_info.desc_list_entry_size;
+ io_sq->bounce_buf_ctrl.buffers_num =
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+ size = io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->desc_addr.virt_addr =
+ io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
- if (!io_sq->desc_addr.virt_addr) {
- io_sq->desc_addr.virt_addr =
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+ pr_err("bounce buffer memory allocation failed");
+ return -ENOMEM;
}
- }
- if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed");
- return -ENOMEM;
+ memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+ sizeof(io_sq->llq_info));
+
+ /* Initiate the first bounce buffer */
+ io_sq->llq_buf_ctrl.curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, io_sq->llq_info.desc_list_entry_size);
+ io_sq->llq_buf_ctrl.descs_left_in_line =
+ io_sq->llq_info.descs_num_before_header;
}
io_sq->tail = 0;
@@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
/* Go over all the completions */
while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
- ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Do not read the rest of the completion entry before the
* phase bit was validated
*/
@@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags, timeout;
+ unsigned long flags = 0;
+ unsigned long timeout;
int ret;
timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -557,10 +584,160 @@ err:
return ret;
}
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ int ret;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+ cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+ cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+ cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+ cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set LLQ configurations: %d\n", ret);
+
+ return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_cfg)
+{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ u16 supported_feat;
+ int rc;
+
+ memset(llq_info, 0, sizeof(*llq_info));
+
+ supported_feat = llq_features->header_location_ctrl_supported;
+
+ if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+ llq_info->header_location_ctrl =
+ llq_default_cfg->llq_header_location;
+ } else {
+ pr_err("Invalid header location control, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+ supported_feat = llq_features->descriptors_stride_ctrl_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+ llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+ } else {
+ if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+ } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+ } else {
+ pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
+ }
+ } else {
+ llq_info->desc_stride_ctrl = 0;
+ }
+
+ supported_feat = llq_features->entry_size_ctrl_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+ llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+ llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+ } else {
+ if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_info->desc_list_entry_size = 128;
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+ llq_info->desc_list_entry_size = 192;
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_info->desc_list_entry_size = 256;
+ } else {
+ pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_ring_entry_size, supported_feat,
+ llq_info->desc_list_entry_size);
+ }
+ if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
+ /* The desc list entry size should be whole multiply of 8
+ * This requirement comes from __iowrite64_copy()
+ */
+ pr_err("illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
+ return -EINVAL;
+ }
+
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
+ llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+ sizeof(struct ena_eth_io_tx_desc);
+ else
+ llq_info->descs_per_entry = 1;
+
+ supported_feat = llq_features->desc_num_before_header_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+ llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+ } else {
+ if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+ } else {
+ pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_num_decs_before_header,
+ supported_feat, llq_info->descs_num_before_header);
+ }
+
+ rc = ena_com_set_llq(ena_dev);
+ if (rc)
+ pr_err("Cannot set LLQ configuration: %d\n", rc);
+
+ return 0;
+}
+
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags;
+ unsigned long flags = 0;
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
@@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
u32 mmio_read_reg, ret, i;
- unsigned long flags;
+ unsigned long flags = 0;
u32 timeout = mmio_read->reg_read_to;
might_sleep();
@@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr);
- else
- devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
}
+
+ if (io_sq->bounce_buf_ctrl.base_buffer) {
+ devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+ io_sq->bounce_buf_ctrl.base_buffer = NULL;
+ }
}
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
@@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
ena_dev->admin_queue.running_state = state;
@@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
get_resp.u.aenq.supported_groups, groups_flag);
return -EOPNOTSUPP;
}
@@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- if (ver < MIN_ENA_VER) {
- pr_err("ENA version is lower than the minimal version the driver supports\n");
- return -1;
- }
-
pr_info("ena controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
sizeof(*mmio_read->read_resp),
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
- return -ENOMEM;
+ goto err;
ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
mmio_read->readless_supported = true;
return 0;
+
+err:
+
+ return -ENOMEM;
}
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1523,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
}
int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers,
- bool init_spinlock)
+ struct ena_aenq_handlers *aenq_handlers)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
@@ -1550,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
atomic_set(&admin_queue->outstanding_cmds, 0);
- if (init_spinlock)
- spin_lock_init(&admin_queue->q_lock);
+ spin_lock_init(&admin_queue->q_lock);
ret = ena_com_init_comp_ctxt(admin_queue);
if (ret)
@@ -1748,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+ if (!rc)
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+ sizeof(get_resp.u.llq));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+ return rc;
+
return 0;
}
@@ -1779,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
+ unsigned long long timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
u8 phase;
@@ -1796,10 +1982,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/
dma_rmb();
+ timestamp =
+ (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom,
- (u64)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ aenq_common->group, aenq_common->syndrom, timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -2441,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
if (unlikely(!host_attr->host_info))
return -ENOMEM;
+ host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+ (ENA_COMMON_SPEC_VERSION_MINOR));
+
return 0;
}
@@ -2712,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
intr_moder_tbl[level].pkts_per_interval;
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_cfg)
+{
+ int rc;
+ int size;
+
+ if (!llq_features->max_llq_num) {
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
+ if (rc)
+ return rc;
+
+ /* Validate the descriptor is not too big */
+ size = ena_dev->tx_max_header_size;
+ size += ena_dev->llq_info.descs_num_before_header *
+ sizeof(struct ena_eth_io_tx_desc);
+
+ if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ pr_err("the size of the LLQ entry is smaller than needed\n");
+ return -EINVAL;
+ }
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 7b784f8a06a6..ae8b4857fce3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
+#include <linux/io.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
@@ -108,6 +109,14 @@ enum ena_intr_moder_level {
ENA_INTR_MAX_NUM_OF_LEVELS,
};
+struct ena_llq_configurations {
+ enum ena_admin_llq_header_location llq_header_location;
+ enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
+ enum ena_admin_llq_stride_ctrl llq_stride_ctrl;
+ enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
+ u16 llq_ring_entry_size_value;
+};
+
struct ena_intr_moder_entry {
unsigned int intr_moder_interval;
unsigned int pkts_per_interval;
@@ -142,6 +151,15 @@ struct ena_com_tx_meta {
u16 l4_hdr_len; /* In words */
};
+struct ena_com_llq_info {
+ u16 header_location_ctrl;
+ u16 desc_stride_ctrl;
+ u16 desc_list_entry_size_ctrl;
+ u16 desc_list_entry_size;
+ u16 descs_num_before_header;
+ u16 descs_per_entry;
+};
+
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
@@ -179,6 +197,20 @@ struct ena_com_io_cq {
} ____cacheline_aligned;
+struct ena_com_io_bounce_buffer_control {
+ u8 *base_buffer;
+ u16 next_to_use;
+ u16 buffer_size;
+ u16 buffers_num; /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+ u8 *curr_bounce_buf;
+ u16 idx;
+ u16 descs_left_in_line;
+};
+
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
@@ -190,6 +222,9 @@ struct ena_com_io_sq {
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
+ struct ena_com_llq_info llq_info;
+ struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+ struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
u16 q_depth;
u16 qid;
@@ -197,6 +232,7 @@ struct ena_com_io_sq {
u16 idx;
u16 tail;
u16 next_to_comp;
+ u16 llq_last_copy_tail;
u32 tx_max_header_size;
u8 phase;
u8 desc_entry_size;
@@ -334,6 +370,8 @@ struct ena_com_dev {
u16 intr_delay_resolution;
u32 intr_moder_tx_interval;
struct ena_intr_moder_entry *intr_moder_tbl;
+
+ struct ena_com_llq_info llq_info;
};
struct ena_com_dev_get_features_ctx {
@@ -342,6 +380,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
+ struct ena_admin_feature_llq_desc llq;
};
struct ena_com_create_io_ctx {
@@ -397,8 +436,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
/* ena_com_admin_init - Init the admin and the async queues
* @ena_dev: ENA communication layer struct
* @aenq_handlers: Those handlers to be called upon event.
- * @init_spinlock: Indicate if this method should init the admin spinlock or
- * the spinlock was init before (for example, in a case of FLR).
*
* Initialize the admin submission and completion queues.
* Initialize the asynchronous events notification queues.
@@ -406,8 +443,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
* @return - 0 on success, negative value on failure.
*/
int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers,
- bool init_spinlock);
+ struct ena_aenq_handlers *aenq_handlers);
/* ena_com_admin_destroy - Destroy the admin and the async events queues.
* @ena_dev: ENA communication layer struct
@@ -935,6 +971,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq_features: LLQ feature descriptor, retrieve via
+ * ena_com_get_dev_attr_feat.
+ * @ena_llq_config: The default driver LLQ parameters configurations
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_config);
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
@@ -1044,4 +1090,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+ u16 size, buffers_num;
+ u8 *buf;
+
+ size = bounce_buf_ctrl->buffer_size;
+ buffers_num = bounce_buf_ctrl->buffers_num;
+
+ buf = bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+ prefetchw(bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+ return buf;
+}
+
#endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index bb8d73676eab..23beb7e7ed7b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -32,8 +32,8 @@
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 2
+#define ENA_COMMON_SPEC_VERSION_MINOR 0
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
struct ena_common_mem_addr {
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 1c682b76190f..f6c2d3855be8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
return cdesc;
}
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
-{
- io_cq->head++;
-
- /* Switch phase bit in case of wrap around */
- if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
- io_cq->phase ^= 1;
-}
-
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
{
u16 tail_masked;
u32 offset;
@@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ u8 *bounce_buffer)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u32 offset = tail_masked * io_sq->desc_entry_size;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
- /* In case this queue isn't a LLQ */
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return;
+ u16 dst_tail_mask;
+ u32 dst_offset;
- memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
- io_sq->desc_addr.virt_addr + offset,
- io_sq->desc_entry_size);
-}
+ dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+ dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+ /* Make sure everything was written into the bounce buffer before
+ * writing the bounce buffer to the device
+ */
+ wmb();
+
+ /* The line is completed. Copy it to dev */
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+ bounce_buffer, (llq_info->desc_list_entry_size) / 8);
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
io_sq->tail++;
/* Switch phase bit in case of wrap around */
if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
io_sq->phase ^= 1;
+
+ return 0;
}
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
- u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ u8 *header_src,
+ u16 header_len)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u8 __iomem *dev_head_addr =
- io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+ u16 header_offset;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
return 0;
- if (unlikely(!io_sq->header_addr)) {
- pr_err("Push buffer header ptr is NULL\n");
- return -EINVAL;
+ header_offset =
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+ if (unlikely((header_offset + header_len) >
+ llq_info->desc_list_entry_size)) {
+ pr_err("trying to write header larger than llq entry can accommodate\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(!bounce_buffer)) {
+ pr_err("bounce buffer is NULL\n");
+ return -EFAULT;
+ }
+
+ memcpy(bounce_buffer + header_offset, header_src, header_len);
+
+ return 0;
+}
+
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ u8 *bounce_buffer;
+ void *sq_desc;
+
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+ if (unlikely(!bounce_buffer)) {
+ pr_err("bounce buffer is NULL\n");
+ return NULL;
+ }
+
+ sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+ pkt_ctrl->idx++;
+ pkt_ctrl->descs_left_in_line--;
+
+ return sq_desc;
+}
+
+static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ int rc;
+
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
+ return 0;
+
+ /* bounce buffer was used, so write it and get a new one */
+ if (pkt_ctrl->idx) {
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ if (unlikely(rc))
+ return rc;
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+ }
+
+ pkt_ctrl->idx = 0;
+ pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+ return 0;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return get_sq_desc_llq(io_sq);
+
+ return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ int rc;
+
+ if (!pkt_ctrl->descs_left_in_line) {
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ if (unlikely(rc))
+ return rc;
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+
+ pkt_ctrl->idx = 0;
+ if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
+ pkt_ctrl->descs_left_in_line = 1;
+ else
+ pkt_ctrl->descs_left_in_line =
+ llq_info->desc_list_entry_size / io_sq->desc_entry_size;
}
- memcpy_toio(dev_head_addr, head_src, header_len);
+ return 0;
+}
+
+static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return ena_com_sq_update_llq_tail(io_sq);
+
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
return 0;
}
@@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
return false;
}
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
memcpy(&io_sq->cached_tx_meta, ena_meta,
sizeof(struct ena_com_tx_meta));
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
- ena_com_sq_update_tail(io_sq);
+ return ena_com_sq_update_tail(io_sq);
}
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -245,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
ena_rx_ctx->l4_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
+ ena_rx_ctx->l4_csum_checked =
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
ena_rx_ctx->hash = cdesc->hash;
ena_rx_ctx->frag =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
@@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
{
struct ena_eth_io_tx_desc *desc = NULL;
struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
- void *push_header = ena_tx_ctx->push_header;
+ void *buffer_to_push = ena_tx_ctx->push_header;
u16 header_len = ena_tx_ctx->header_len;
u16 num_bufs = ena_tx_ctx->num_bufs;
- int total_desc, i, rc;
+ u16 start_tail = io_sq->tail;
+ int i, rc;
bool have_meta;
u64 addr_hi;
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
/* num_bufs +1 for potential meta desc */
- if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
- pr_err("Not enough space in the tx queue\n");
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
+ pr_debug("Not enough space in the tx queue\n");
return -ENOMEM;
}
@@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
return -EINVAL;
}
- /* start with pushing the header (if needed) */
- rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+ !buffer_to_push))
+ return -EINVAL;
+
+ rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
return rc;
have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
ena_tx_ctx);
- if (have_meta)
- ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+ if (have_meta) {
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+ if (unlikely(rc))
+ return rc;
+ }
- /* If the caller doesn't want send packets */
+ /* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
- *nb_hw_desc = have_meta ? 0 : 1;
- return 0;
+ rc = ena_com_close_bounce_buffer(io_sq);
+ *nb_hw_desc = io_sq->tail - start_tail;
+ return rc;
}
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
/* Set first desc when we don't have meta descriptor */
@@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
for (i = 0; i < num_bufs; i++) {
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
- ena_com_sq_update_tail(io_sq);
+ rc = ena_com_sq_update_tail(io_sq);
+ if (unlikely(rc))
+ return rc;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
desc->len_ctrl |= (io_sq->phase <<
@@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* set the last desc indicator */
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
- ena_com_sq_update_tail(io_sq);
+ rc = ena_com_sq_update_tail(io_sq);
+ if (unlikely(rc))
+ return rc;
- total_desc = max_t(u16, num_bufs, 1);
- total_desc += have_meta ? 1 : 0;
+ rc = ena_com_close_bounce_buffer(io_sq);
- *nb_hw_desc = total_desc;
- return 0;
+ *nb_hw_desc = io_sq->tail - start_tail;
+ return rc;
}
int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
@@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return -ENOSPC;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
desc->length = ena_buf->len;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
@@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
- ena_com_sq_update_tail(io_sq);
-
- return 0;
-}
-
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
-{
- u8 expected_phase, cdesc_phase;
- struct ena_eth_io_tx_cdesc *cdesc;
- u16 masked_head;
-
- masked_head = io_cq->head & (io_cq->q_depth - 1);
- expected_phase = io_cq->phase;
-
- cdesc = (struct ena_eth_io_tx_cdesc *)
- ((uintptr_t)io_cq->cdesc_addr.virt_addr +
- (masked_head * io_cq->cdesc_entry_size_in_bytes));
-
- /* When the current completion descriptor phase isn't the same as the
- * expected, it mean that the device still didn't update
- * this completion.
- */
- cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
- if (cdesc_phase != expected_phase)
- return -EAGAIN;
-
- dma_rmb();
- if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
- pr_err("Invalid req id %d\n", cdesc->req_id);
- return -EINVAL;
- }
-
- ena_com_cq_inc_head(io_cq);
-
- *req_id = READ_ONCE(cdesc->req_id);
-
- return 0;
+ return ena_com_sq_update_tail(io_sq);
}
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f7657227cfe..340d02b64ca6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -67,6 +67,7 @@ struct ena_com_rx_ctx {
enum ena_eth_io_l4_proto_index l4_proto;
bool l3_csum_err;
bool l4_csum_err;
+ u8 l4_csum_checked;
/* fragmented packet */
bool frag;
u32 hash;
@@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
struct ena_com_buf *ena_buf,
u16 req_id);
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
-
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -107,11 +106,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+ u16 required_buffers)
{
- u16 tail;
+ int temp;
- tail = io_sq->tail;
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return ena_com_free_desc(io_sq) >= required_buffers;
+
+ /* This calculation doesn't need to be 100% accurate. So to reduce
+ * the calculation overhead just Subtract 2 lines from the free descs
+ * (one for the header line and one to compensate the devision
+ * down calculation.
+ */
+ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+ return ena_com_free_desc(io_sq) > temp;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
@@ -159,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
io_sq->next_to_comp += elem;
}
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+ u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -EAGAIN;
+
+ dma_rmb();
+
+ *req_id = READ_ONCE(cdesc->req_id);
+ if (unlikely(*req_id >= io_cq->q_depth)) {
+ pr_err("Invalid req id %d\n", cdesc->req_id);
+ return -EINVAL;
+ }
+
+ ena_com_cq_inc_head(io_cq);
+
+ return 0;
+}
+
#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index f320c58793a5..00e0f056a741 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -33,25 +33,18 @@
#define _ENA_ETH_IO_H_
enum ena_eth_io_l3_proto_index {
- ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
-
- ENA_ETH_IO_L3_PROTO_IPV4 = 8,
-
- ENA_ETH_IO_L3_PROTO_IPV6 = 11,
-
- ENA_ETH_IO_L3_PROTO_FCOE = 21,
-
- ENA_ETH_IO_L3_PROTO_ROCE = 22,
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
};
enum ena_eth_io_l4_proto_index {
- ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
-
- ENA_ETH_IO_L4_PROTO_TCP = 12,
-
- ENA_ETH_IO_L4_PROTO_UDP = 13,
-
- ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
};
struct ena_eth_io_tx_desc {
@@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base {
* checksum error detected, or, the controller didn't
* validate the checksum. This bit is valid only when
* l4_proto_idx indicates TCP/UDP packet, and,
- * ipv4_frag is not set
+ * ipv4_frag is not set. This bit is valid only when
+ * l4_csum_checked below is set.
* 15 : ipv4_frag - Indicates IPv4 fragmented packet
- * 23:16 : reserved16
+ * 16 : l4_csum_checked - L4 checksum was verified
+ * (could be OK or error), when cleared the status of
+ * checksum is unknown
+ * 23:17 : reserved17 - MBZ
* 24 : phase
* 25 : l3_csum2 - second checksum engine result
* 26 : first - Indicates first descriptor in
@@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg {
};
/* tx_desc */
-#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
-#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
-#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
-#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
-#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
-#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
-#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
-#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
-#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
-#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
-#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
-#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
-#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
-#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
-#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
-#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
-#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
-#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
-#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
-#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
-#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
-#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
-#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
-#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
-#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
-#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
-#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
/* tx_meta_desc */
-#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
-#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
-#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
-#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
-#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
-#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
-#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
-#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
-#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
-#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
-#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
-#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
-#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
-#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
-#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
-#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
-#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
-#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
-#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
/* tx_cdesc */
-#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
/* rx_desc */
-#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
-#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
-#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
-#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
-#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
-#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
-#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
/* rx_cdesc_base */
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
-#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
-#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
-#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
-#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
-#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
-#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
-#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
-#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
-#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
-#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
-#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
/* intr_reg */
-#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
-#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
-#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
-#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
-#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
/* numa_node_cfg_reg */
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 521607bc4393..f3a5a384e6e8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(doorbells),
ENA_STAT_TX_ENTRY(prepare_ctx_err),
ENA_STAT_TX_ENTRY(bad_req_id),
+ ENA_STAT_TX_ENTRY(llq_buffer_copy),
ENA_STAT_TX_ENTRY(missed_tx),
};
@@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
+ ENA_STAT_RX_ENTRY(csum_unchecked),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 13eb6a4d98d5..284a0a612131 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -237,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
}
}
+ size = tx_ring->tx_max_header_size;
+ tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ tx_ring->push_buf_intermediate_buf = vzalloc(size);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ vfree(tx_ring->tx_buffer_info);
+ vfree(tx_ring->free_tx_ids);
+ return -ENOMEM;
+ }
+ }
+
/* Req id ring for TX out of order completions */
for (i = 0; i < tx_ring->ring_size; i++)
tx_ring->free_tx_ids[i] = i;
@@ -265,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
vfree(tx_ring->free_tx_ids);
tx_ring->free_tx_ids = NULL;
+
+ vfree(tx_ring->push_buf_intermediate_buf);
+ tx_ring->push_buf_intermediate_buf = NULL;
}
/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
@@ -602,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
+static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
+{
+ struct ena_com_buf *ena_buf;
+ u32 cnt;
+ int i;
+
+ ena_buf = tx_info->bufs;
+ cnt = tx_info->num_of_bufs;
+
+ if (unlikely(!cnt))
+ return;
+
+ if (tx_info->map_linear_data) {
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+ ena_buf++;
+ cnt--;
+ }
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < cnt; i++) {
+ dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+ ena_buf++;
+ }
+}
+
/* ena_free_tx_bufs - Free Tx Buffers per Queue
* @tx_ring: TX ring for which buffers be freed
*/
@@ -612,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
- struct ena_com_buf *ena_buf;
- int nr_frags;
- int j;
if (!tx_info->skb)
continue;
@@ -630,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring->qid, i);
}
- ena_buf = tx_info->bufs;
- dma_unmap_single(tx_ring->dev,
- ena_buf->paddr,
- ena_buf->len,
- DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- nr_frags = tx_info->num_of_bufs - 1;
- for (j = 0; j < nr_frags; j++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev,
- ena_buf->paddr,
- ena_buf->len,
- DMA_TO_DEVICE);
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
}
@@ -735,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
while (tx_pkts < budget) {
struct ena_tx_buffer *tx_info;
struct sk_buff *skb;
- struct ena_com_buf *ena_buf;
- int i, nr_frags;
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
@@ -756,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_info->skb = NULL;
tx_info->last_jiffies = 0;
- if (likely(tx_info->num_of_bufs != 0)) {
- ena_buf = tx_info->bufs;
-
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len),
- DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- nr_frags = tx_info->num_of_bufs - 1;
- for (i = 0; i < nr_frags; i++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len),
- DMA_TO_DEVICE);
- }
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -804,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
*/
smp_mb();
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
- ENA_TX_WAKEUP_THRESH;
+ above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH);
if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
- ENA_TX_WAKEUP_THRESH;
+ above_thresh =
+ ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH);
if (netif_tx_queue_stopped(txq) && above_thresh) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
@@ -985,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
return;
}
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (likely(ena_rx_ctx->l4_csum_checked)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.csum_unchecked++;
+ u64_stats_update_end(&rx_ring->syncp);
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
}
+
}
static void ena_set_rx_hash(struct ena_ring *rx_ring,
@@ -1101,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
- refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
+ refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_threshold =
+ min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
+ ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
if (refill_required > refill_threshold) {
@@ -1299,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
-
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1574,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
if (rc)
return rc;
- ena_init_napi(adapter);
-
ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
ena_refill_all_rx_bufs(adapter);
@@ -1592,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter)
static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
{
- struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_create_io_ctx ctx;
struct ena_com_dev *ena_dev;
struct ena_ring *tx_ring;
u32 msix_vector;
@@ -1605,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_TXQ_IDX(qid);
+ memset(&ctx, 0x0, sizeof(ctx));
+
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
ctx.qid = ena_qid;
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
@@ -1658,7 +1679,7 @@ create_err:
static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
{
struct ena_com_dev *ena_dev;
- struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_create_io_ctx ctx;
struct ena_ring *rx_ring;
u32 msix_vector;
u16 ena_qid;
@@ -1670,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_RXQ_IDX(qid);
+ memset(&ctx, 0x0, sizeof(ctx));
+
ctx.qid = ena_qid;
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -1729,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter)
ena_setup_io_intr(adapter);
+ /* napi poll functions should be initialized before running
+ * request_irq(), to handle a rare condition where there is a pending
+ * interrupt, causing the ISR to fire immediately while the poll
+ * function wasn't set yet, causing a null dereference
+ */
+ ena_init_napi(adapter);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -1980,73 +2010,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
return rc;
}
-/* Called with netif_tx_lock. */
-static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int ena_tx_map_skb(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct sk_buff *skb,
+ void **push_hdr,
+ u16 *header_len)
{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_tx_buffer *tx_info;
- struct ena_com_tx_ctx ena_tx_ctx;
- struct ena_ring *tx_ring;
- struct netdev_queue *txq;
+ struct ena_adapter *adapter = tx_ring->adapter;
struct ena_com_buf *ena_buf;
- void *push_hdr;
- u32 len, last_frag;
- u16 next_to_use;
- u16 req_id;
- u16 push_len;
- u16 header_len;
dma_addr_t dma;
- int qid, rc, nb_hw_desc;
- int i = -1;
-
- netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
- /* Determine which tx ring we will be placed on */
- qid = skb_get_queue_mapping(skb);
- tx_ring = &adapter->tx_ring[qid];
- txq = netdev_get_tx_queue(dev, qid);
-
- rc = ena_check_and_linearize_skb(tx_ring, skb);
- if (unlikely(rc))
- goto error_drop_packet;
-
- skb_tx_timestamp(skb);
- len = skb_headlen(skb);
-
- next_to_use = tx_ring->next_to_use;
- req_id = tx_ring->free_tx_ids[next_to_use];
- tx_info = &tx_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
+ u32 skb_head_len, frag_len, last_frag;
+ u16 push_len = 0;
+ u16 delta = 0;
+ int i = 0;
- WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
- ena_buf = tx_info->bufs;
+ skb_head_len = skb_headlen(skb);
tx_info->skb = skb;
+ ena_buf = tx_info->bufs;
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* prepared the push buffer */
- push_len = min_t(u32, len, tx_ring->tx_max_header_size);
- header_len = push_len;
- push_hdr = skb->data;
+ /* When the device is LLQ mode, the driver will copy
+ * the header into the device memory space.
+ * the ena_com layer assume the header is in a linear
+ * memory space.
+ * This assumption might be wrong since part of the header
+ * can be in the fragmented buffers.
+ * Use skb_header_pointer to make sure the header is in a
+ * linear memory space.
+ */
+
+ push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
+ *push_hdr = skb_header_pointer(skb, 0, push_len,
+ tx_ring->push_buf_intermediate_buf);
+ *header_len = push_len;
+ if (unlikely(skb->data != *push_hdr)) {
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.llq_buffer_copy++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ delta = push_len - skb_head_len;
+ }
} else {
- push_len = 0;
- header_len = min_t(u32, len, tx_ring->tx_max_header_size);
- push_hdr = NULL;
+ *push_hdr = NULL;
+ *header_len = min_t(u32, skb_head_len,
+ tx_ring->tx_max_header_size);
}
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
- push_hdr, push_len);
+ *push_hdr, push_len);
- if (len > push_len) {
+ if (skb_head_len > push_len) {
dma = dma_map_single(tx_ring->dev, skb->data + push_len,
- len - push_len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ skb_head_len - push_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error;
ena_buf->paddr = dma;
- ena_buf->len = len - push_len;
+ ena_buf->len = skb_head_len - push_len;
ena_buf++;
tx_info->num_of_bufs++;
+ tx_info->map_linear_data = 1;
+ } else {
+ tx_info->map_linear_data = 0;
}
last_frag = skb_shinfo(skb)->nr_frags;
@@ -2054,18 +2081,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < last_frag; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = skb_frag_size(frag);
- dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ frag_len = skb_frag_size(frag);
+
+ if (unlikely(delta >= frag_len)) {
+ delta -= frag_len;
+ continue;
+ }
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
+ frag_len - delta, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error;
ena_buf->paddr = dma;
- ena_buf->len = len;
+ ena_buf->len = frag_len - delta;
ena_buf++;
+ tx_info->num_of_bufs++;
+ delta = 0;
}
- tx_info->num_of_bufs += last_frag;
+ return 0;
+
+error_report_dma_error:
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map skb\n");
+
+ tx_info->skb = NULL;
+
+ tx_info->num_of_bufs += i;
+ ena_unmap_tx_skb(tx_ring, tx_info);
+
+ return -EINVAL;
+}
+
+/* Called with netif_tx_lock. */
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_ring *tx_ring;
+ struct netdev_queue *txq;
+ void *push_hdr;
+ u16 next_to_use, req_id, header_len;
+ int qid, rc, nb_hw_desc;
+
+ netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
+ /* Determine which tx ring we will be placed on */
+ qid = skb_get_queue_mapping(skb);
+ tx_ring = &adapter->tx_ring[qid];
+ txq = netdev_get_tx_queue(dev, qid);
+
+ rc = ena_check_and_linearize_skb(tx_ring, skb);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ skb_tx_timestamp(skb);
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_tx_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
+
+ rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
+ if (unlikely(rc))
+ goto error_drop_packet;
memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
ena_tx_ctx.ena_bufs = tx_info->bufs;
@@ -2081,14 +2165,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc);
+ /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
+ * since the number of free descriptors in the queue is checked
+ * after sending the previous packet. In case there isn't enough
+ * space in the queue for the next packet, it is stopped
+ * until there is again enough available space in the queue.
+ * All other failure reasons of ena_com_prepare_tx() are fatal
+ * and therefore require a device reset.
+ */
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
"failed to prepare tx bufs\n");
u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_stop++;
tx_ring->tx_stats.prepare_ctx_err++;
u64_stats_update_end(&tx_ring->syncp);
- netif_tx_stop_queue(txq);
+ adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
goto error_unmap_dma;
}
@@ -2110,8 +2202,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
*/
- if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
- (tx_ring->sgl_size + 2))) {
+ if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ tx_ring->sgl_size + 2))) {
netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
__func__, qid);
@@ -2130,8 +2222,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_mb();
- if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
- > ENA_TX_WAKEUP_THRESH) {
+ if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.queue_wakeup++;
@@ -2151,35 +2243,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
-error_report_dma_error:
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&tx_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map skb\n");
-
- tx_info->skb = NULL;
-
error_unmap_dma:
- if (i >= 0) {
- /* save value of frag that failed */
- last_frag = i;
-
- /* start back at beginning and unmap skb */
- tx_info->skb = NULL;
- ena_buf = tx_info->bufs;
- dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- for (i = 0; i < last_frag; i++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
- }
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
+ tx_info->skb = NULL;
error_drop_packet:
-
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2201,7 +2269,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
return qid;
}
-static void ena_config_host_info(struct ena_com_dev *ena_dev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev,
+ struct pci_dev *pdev)
{
struct ena_admin_host_info *host_info;
int rc;
@@ -2215,6 +2284,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info = ena_dev->host_attr.host_info;
+ host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
strncpy(host_info->kernel_ver_str, utsname()->version,
@@ -2225,7 +2295,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info->driver_version =
(DRV_MODULE_VER_MAJOR) |
(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
- (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
+ ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
+ host_info->num_cpus = num_online_cpus();
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -2436,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
}
/* ENA admin level init */
- rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
dev_err(dev,
"Can not initialize ena admin queue with device\n");
@@ -2449,7 +2521,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*/
ena_com_set_admin_polling_mode(ena_dev, true);
- ena_config_host_info(ena_dev);
+ ena_config_host_info(ena_dev, pdev);
/* Get Device Attributes*/
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
@@ -2534,15 +2606,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
-
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
- /* Before releasing the ENA resources, a device reset is required.
- * (to prevent the device from accessing them).
+ /* Stop the device from sending AENQ events (in case reset flag is set
+ * and device is up, ena_close already reset the device
* In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
@@ -2611,14 +2682,20 @@ static int ena_restore_device(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_err(&pdev->dev, "Device reset completed successfully\n");
+ dev_err(&pdev->dev,
+ "Device reset completed successfully, Driver info: %s\n",
+ version);
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_device_destroy:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -2800,7 +2877,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring = &adapter->rx_ring[i];
refill_required =
- ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+ ena_com_free_desc(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
@@ -2946,7 +3023,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
/* In case of LLQ use the llq number in the get feature cmd */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq_num = get_feat_ctx->max_queues.max_llq_num;
+ io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num;
if (io_sq_num == 0) {
dev_err(&pdev->dev,
@@ -2974,18 +3051,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
return io_queue_num;
}
-static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_set_queues_placement_policy(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq,
+ struct ena_llq_configurations *llq_default_configurations)
{
bool has_mem_bar;
+ int rc;
+ u32 llq_feature_mask;
+
+ llq_feature_mask = 1 << ENA_ADMIN_LLQ;
+ if (!(ena_dev->supported_features & llq_feature_mask)) {
+ dev_err(&pdev->dev,
+ "LLQ is not supported Fallback to host mode policy.\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
- /* Enable push mode if device supports LLQ */
- if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- else
+ rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
+ if (unlikely(rc)) {
+ dev_err(&pdev->dev,
+ "Failed to configure the device mode. Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ /* Nothing to config, exit */
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (!has_mem_bar) {
+ dev_err(&pdev->dev,
+ "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
+
+ if (!ena_dev->mem_bar)
+ return -EFAULT;
+
+ return 0;
}
static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
@@ -3098,18 +3209,20 @@ err_rss_init:
static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
- int release_bars;
-
- if (ena_dev->mem_bar)
- devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+ int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
- if (ena_dev->reg_bar)
- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
-
- release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
}
+static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+{
+ llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+ llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+ llq_config->llq_ring_entry_size_value = 128;
+}
+
static int ena_calc_queue_size(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
u16 *max_tx_sgl_size,
@@ -3125,7 +3238,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
queue_size = min_t(u32, queue_size,
- get_feat_ctx->max_queues.max_llq_depth);
+ get_feat_ctx->max_queues.max_legacy_llq_depth);
queue_size = rounddown_pow_of_two(queue_size);
@@ -3158,7 +3271,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int version_printed;
struct net_device *netdev;
struct ena_adapter *adapter;
+ struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
+ char *queue_type_str;
static int adapters_found;
int io_queue_num, bars, rc;
int queue_size;
@@ -3212,16 +3327,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_region;
}
- ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
+ set_default_llq_configurations(&llq_config);
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, ENA_MEM_BAR),
- pci_resource_len(pdev, ENA_MEM_BAR));
- if (!ena_dev->mem_bar) {
- rc = -EFAULT;
- goto err_device_destroy;
- }
+ rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
+ &llq_config);
+ if (rc) {
+ dev_err(&pdev->dev, "ena device init failed\n");
+ goto err_device_destroy;
}
/* initial Tx interrupt delay, Assumes 1 usec granularity.
@@ -3236,8 +3348,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
- io_queue_num, queue_size);
+ dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
+ io_queue_num, queue_size,
+ (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
+ "ENABLED" : "DISABLED");
/* dev zeroed in init_etherdev */
netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
@@ -3327,9 +3441,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ queue_type_str = "Regular";
+ else
+ queue_type_str = "Low Latency";
+
+ dev_info(&pdev->dev,
+ "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, io_queue_num);
+ netdev->dev_addr, io_queue_num, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 7c7ae56c52cf..521873642339 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -43,9 +43,9 @@
#include "ena_com.h"
#include "ena_eth_com.h"
-#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 5
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_VER_MAJOR 2
+#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_SUBMINOR 1
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
@@ -61,6 +61,17 @@
#define ENA_ADMIN_MSIX_VEC 1
#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passes 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
#define ENA_MIN_MSIX_VEC 2
#define ENA_REG_BAR 0
@@ -70,7 +81,7 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
-#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN)
+#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
/* limit the buffer size to 600 bytes to handle MTU changes from very
* small to very large, in which case the number of buffers per packet
@@ -95,10 +106,11 @@
*/
#define ENA_TX_POLL_BUDGET_DIVIDER 4
-/* Refill Rx queue when number of available descriptors is below
- * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER
+/* Refill Rx queue when number of required descriptors is above
+ * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET
*/
#define ENA_RX_REFILL_THRESH_DIVIDER 8
+#define ENA_RX_REFILL_THRESH_PACKET 256
/* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4
@@ -151,6 +163,9 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
+ /* Indicate if bufs[0] map the linear data of the skb. */
+ u8 map_linear_data;
+
/* Used for detect missing tx packets to limit the number of prints */
u32 print_once;
/* Save the last jiffies to detect missing tx packets
@@ -186,6 +201,7 @@ struct ena_stats_tx {
u64 tx_poll;
u64 doorbells;
u64 bad_req_id;
+ u64 llq_buffer_copy;
u64 missed_tx;
};
@@ -201,6 +217,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring;
+ u64 csum_unchecked;
};
struct ena_ring {
@@ -257,6 +274,8 @@ struct ena_ring {
struct ena_stats_tx tx_stats;
struct ena_stats_rx rx_stats;
};
+
+ u8 *push_buf_intermediate_buf;
int empty_rx_queue;
} ____cacheline_aligned;
@@ -355,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_get_sset_count(struct net_device *netdev, int sset);
-/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
- * driver passas 0.
- * Since the max packet size the ENA handles is ~9kB limit the buffer length to
- * 16kB.
- */
-#if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
-#else
-#define ENA_PAGE_SIZE PAGE_SIZE
-#endif
-
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 48ca97fbe7bc..04fcafcc059c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -33,137 +33,125 @@
#define _ENA_REGS_H_
enum ena_regs_reset_reason_types {
- ENA_REGS_RESET_NORMAL = 0,
-
- ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
-
- ENA_REGS_RESET_ADMIN_TO = 2,
-
- ENA_REGS_RESET_MISS_TX_CMPL = 3,
-
- ENA_REGS_RESET_INV_RX_REQ_ID = 4,
-
- ENA_REGS_RESET_INV_TX_REQ_ID = 5,
-
- ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
-
- ENA_REGS_RESET_INIT_ERR = 7,
-
- ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
-
- ENA_REGS_RESET_OS_TRIGGER = 9,
-
- ENA_REGS_RESET_OS_NETDEV_WD = 10,
-
- ENA_REGS_RESET_SHUTDOWN = 11,
-
- ENA_REGS_RESET_USER_TRIGGER = 12,
-
- ENA_REGS_RESET_GENERIC = 13,
-
- ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_NORMAL = 0,
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+ ENA_REGS_RESET_ADMIN_TO = 2,
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+ ENA_REGS_RESET_INIT_ERR = 7,
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+ ENA_REGS_RESET_SHUTDOWN = 11,
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+ ENA_REGS_RESET_GENERIC = 13,
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
};
/* ena_registers offsets */
-#define ENA_REGS_VERSION_OFF 0x0
-#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
-#define ENA_REGS_CAPS_OFF 0x8
-#define ENA_REGS_CAPS_EXT_OFF 0xc
-#define ENA_REGS_AQ_BASE_LO_OFF 0x10
-#define ENA_REGS_AQ_BASE_HI_OFF 0x14
-#define ENA_REGS_AQ_CAPS_OFF 0x18
-#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
-#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
-#define ENA_REGS_ACQ_CAPS_OFF 0x28
-#define ENA_REGS_AQ_DB_OFF 0x2c
-#define ENA_REGS_ACQ_TAIL_OFF 0x30
-#define ENA_REGS_AENQ_CAPS_OFF 0x34
-#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
-#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
-#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
-#define ENA_REGS_AENQ_TAIL_OFF 0x44
-#define ENA_REGS_INTR_MASK_OFF 0x4c
-#define ENA_REGS_DEV_CTL_OFF 0x54
-#define ENA_REGS_DEV_STS_OFF 0x58
-#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
-#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
-#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* 0 base */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
/* version register */
-#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
-#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
-#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
/* controller_version register */
-#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
-#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
-#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
-#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
-#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
-#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
-#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
/* caps register */
-#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
-#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
-#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
-#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
-#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
-#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
-#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
-#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
-#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
/* acq_caps register */
-#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
-#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
/* aenq_caps register */
-#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
-#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
/* dev_ctl register */
-#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
-#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
-#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
-#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
-#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
-#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
-#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
-#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
-#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
-#define ENA_REGS_DEV_STS_READY_MASK 0x1
-#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
-#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
-#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
-#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
-#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
-#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
-#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
-#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
-#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
-#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
/* mmio_reg_read register */
-#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
-#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
-#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
/* rss_ind_entry_update register */
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 750007513f9d..1d5d6b8df855 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
const struct aq_hw_ops **ops,
const struct aq_hw_caps_s **caps)
{
- int i = 0;
+ int i;
if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
return -EINVAL;
@@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
int aq_pci_func_init(struct pci_dev *pdev)
{
- int err = 0;
+ int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
@@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
- int err = 0;
+ int err;
if (pdev->msix_enabled || pdev->msi_enabled)
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
@@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
- unsigned int i = 0U;
+ unsigned int i;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
@@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
- struct aq_nic_s *self = NULL;
- int err = 0;
+ struct aq_nic_s *self;
+ int err;
struct net_device *ndev;
resource_size_t mmio_pa;
u32 bar;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e2d92548226a..de987ccdcf1f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -111,6 +111,7 @@ enum board_idx {
BCM57452,
BCM57454,
BCM5745x_NPAR,
+ BCM57508,
BCM58802,
BCM58804,
BCM58808,
@@ -152,6 +153,7 @@ static const struct {
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
+ [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -196,6 +198,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -241,15 +244,46 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-#define BNXT_CP_DB_REARM(db, raw_cons) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
-
-#define BNXT_CP_DB(db, raw_cons) \
- writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
-
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
+#define BNXT_DB_CQ(db, idx) \
+ writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_NQ_P5(db, idx) \
+ writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_CQ_ARM(db, idx) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_NQ_ARM_P5(db, idx) \
+ writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
+
+static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ BNXT_DB_NQ_P5(db, idx);
+ else
+ BNXT_DB_CQ(db, idx);
+}
+
+static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ BNXT_DB_NQ_ARM_P5(db, idx);
+ else
+ BNXT_DB_CQ_ARM(db, idx);
+}
+
+static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
+ db->doorbell);
+ else
+ BNXT_DB_CQ(db, idx);
+}
+
const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
@@ -341,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
+ void __iomem *db = txr->tx_db.doorbell;
void *pdata = tx_push_buf->data;
u64 *end;
int j, push_len;
@@ -398,12 +433,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
- __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
- __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ __iowrite64_copy(db, tx_push_buf, 16);
+ __iowrite32_copy(db + 4, tx_push_buf + 1,
(push_len - 16) << 1);
} else {
- __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
- push_len);
+ __iowrite64_copy(db, tx_push_buf, push_len);
}
goto tx_done;
@@ -505,7 +539,7 @@ normal_tx:
txr->tx_prod = prod;
if (!skb->xmit_more || netif_xmit_stopped(txq))
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+ bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
@@ -513,7 +547,7 @@ tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+ bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);
@@ -776,11 +810,11 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
u32 agg_bufs)
{
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
@@ -903,12 +937,13 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb, u16 cp_cons,
u32 agg_bufs)
{
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u32 i;
@@ -955,7 +990,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
return NULL;
}
@@ -1012,10 +1047,9 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
return skb;
}
-static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 *raw_cons, void *cmp)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct rx_cmp *rxcmp = cmp;
u32 tmp_raw_cons = *raw_cons;
u8 cmp_type, agg_bufs = 0;
@@ -1141,11 +1175,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
- u16 cp_cons, u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
+ u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
}
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
@@ -1339,13 +1373,13 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
}
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
- struct bnxt_napi *bnapi,
+ struct bnxt_cp_ring_info *cpr,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
@@ -1357,7 +1391,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
void *data;
if (unlikely(bnapi->in_reset)) {
- int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+ int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
if (rc < 0)
return ERR_PTR(-EBUSY);
@@ -1383,7 +1417,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1393,7 +1427,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
} else {
@@ -1402,7 +1436,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
@@ -1417,7 +1451,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1425,7 +1459,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1479,10 +1513,10 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
* -ENOMEM - packet aborted due to out of memory
* -EIO - packet aborted due to hw error indicated in BD
*/
-static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
- u8 *event)
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
struct net_device *dev = bp->dev;
struct rx_cmp *rxcmp;
@@ -1521,7 +1555,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
- skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
(struct rx_tpa_end_cmp_ext *)rxcmp1, event);
@@ -1542,7 +1576,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
data = rx_buf->data;
data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
- int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
@@ -1565,7 +1599,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
goto next_rx;
@@ -1602,7 +1636,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1664,10 +1698,10 @@ next_rx_no_prod_no_len:
/* In netpoll mode, if we are using a combined completion ring, we need to
* discard the rx packets and recycle the buffers.
*/
-static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_force_rx_discard(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
u32 *raw_cons, u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 tmp_raw_cons = *raw_cons;
struct rx_cmp_ext *rxcmp1;
struct rx_cmp *rxcmp;
@@ -1697,7 +1731,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
tpa_end1->rx_tpa_end_cmp_errors_v2 |=
cpu_to_le32(RX_TPA_END_CMP_ERRORS);
}
- return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+ return bnxt_rx_pkt(bp, cpr, raw_cons, event);
}
#define BNXT_GET_EVENT_PORT(data) \
@@ -1848,7 +1882,7 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
}
/* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -1858,9 +1892,10 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int budget)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
int tx_pkts = 0;
@@ -1868,6 +1903,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
u8 event = 0;
struct tx_cmp *txcmp;
+ cpr->has_more_work = 0;
while (1) {
int rc;
@@ -1881,19 +1917,22 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
* reading any further.
*/
dma_rmb();
+ cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (budget)
+ cpr->has_more_work = 1;
break;
}
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
if (likely(budget))
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
- rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+ rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
if (likely(rc >= 0))
rx_pkts += rc;
@@ -1916,39 +1955,60 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts && rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget) {
+ cpr->has_more_work = 1;
break;
+ }
}
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- void __iomem *db = txr->tx_doorbell;
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
+ bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
}
cpr->cp_raw_cons = raw_cons;
- /* ACK completion ring before freeing tx ring and producing new
- * buffers in rx/agg rings to prevent overflowing the completion
- * ring.
- */
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnapi->tx_pkts += tx_pkts;
+ bnapi->events |= event;
+ return rx_pkts;
+}
- if (tx_pkts)
- bnapi->tx_int(bp, bnapi, tx_pkts);
+static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ if (bnapi->tx_pkts) {
+ bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
+ bnapi->tx_pkts = 0;
+ }
- if (event & BNXT_RX_EVENT) {
+ if (bnapi->events & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
- if (event & BNXT_AGG_EVENT)
- bnxt_db_write(bp, rxr->rx_agg_doorbell,
- DB_KEY_RX | rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bnapi->events & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
}
+ bnapi->events = 0;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int budget)
+{
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ int rx_pkts;
+
+ rx_pkts = __bnxt_poll_work(bp, cpr, budget);
+
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ __bnxt_poll_work_done(bp, bnapi);
return rx_pkts;
}
@@ -1987,7 +2047,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
if (likely(rc == -EIO) && budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
@@ -2006,16 +2066,15 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
}
cpr->cp_raw_cons = raw_cons;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
+ BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (event & BNXT_AGG_EVENT)
- bnxt_db_write(bp, rxr->rx_agg_doorbell,
- DB_KEY_RX | rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
}
return rx_pkts;
}
@@ -2028,19 +2087,17 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
int work_done = 0;
while (1) {
- work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+ work_done += bnxt_poll_work(bp, cpr, budget - work_done);
if (work_done >= budget) {
if (!budget)
- BNXT_CP_DB_REARM(cpr->cp_doorbell,
- cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
break;
}
if (!bnxt_has_work(bp, cpr)) {
if (napi_complete_done(napi, work_done))
- BNXT_CP_DB_REARM(cpr->cp_doorbell,
- cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
break;
}
}
@@ -2057,6 +2114,104 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int i, work_done = 0;
+
+ for (i = 0; i < 2; i++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+
+ if (cpr2) {
+ work_done += __bnxt_poll_work(bp, cpr2,
+ budget - work_done);
+ cpr->has_more_work |= cpr2->has_more_work;
+ }
+ }
+ return work_done;
+}
+
+static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u64 dbr_type, bool all)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ struct bnxt_db_info *db;
+
+ if (cpr2 && (all || cpr2->had_work_done)) {
+ db = &cpr2->cp_db;
+ writeq(db->db_key64 | dbr_type |
+ RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ cpr2->had_work_done = 0;
+ }
+ }
+ __bnxt_poll_work_done(bp, bnapi);
+}
+
+static int bnxt_poll_p5(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ struct bnxt *bp = bnapi->bp;
+ struct nqe_cn *nqcmp;
+ int work_done = 0;
+ u32 cons;
+
+ if (cpr->has_more_work) {
+ cpr->has_more_work = 0;
+ work_done = __bnxt_poll_cqs(bp, bnapi, budget);
+ if (cpr->has_more_work) {
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
+ return work_done;
+ }
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
+ if (napi_complete_done(napi, work_done))
+ BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
+ return work_done;
+ }
+ while (1) {
+ cons = RING_CMP(raw_cons);
+ nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
+ false);
+ cpr->cp_raw_cons = raw_cons;
+ if (napi_complete_done(napi, work_done))
+ BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
+ cpr->cp_raw_cons);
+ return work_done;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ struct bnxt_cp_ring_info *cpr2;
+
+ cpr2 = cpr->cp_ring_arr[idx];
+ work_done += __bnxt_poll_work(bp, cpr2,
+ budget - work_done);
+ cpr->has_more_work = cpr2->has_more_work;
+ } else {
+ bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (cpr->has_more_work)
+ break;
+ }
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
+ cpr->cp_raw_cons = raw_cons;
+ return work_done;
+}
+
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
@@ -2202,60 +2357,73 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
int i;
- for (i = 0; i < ring->nr_pages; i++) {
- if (!ring->pg_arr[i])
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
continue;
- dma_free_coherent(&pdev->dev, ring->page_size,
- ring->pg_arr[i], ring->dma_arr[i]);
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
- ring->pg_arr[i] = NULL;
+ rmem->pg_arr[i] = NULL;
}
- if (ring->pg_tbl) {
- dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
- ring->pg_tbl, ring->pg_tbl_map);
- ring->pg_tbl = NULL;
+ if (rmem->pg_tbl) {
+ dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
+ rmem->pg_tbl, rmem->pg_tbl_map);
+ rmem->pg_tbl = NULL;
}
- if (ring->vmem_size && *ring->vmem) {
- vfree(*ring->vmem);
- *ring->vmem = NULL;
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
}
}
-static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
- int i;
struct pci_dev *pdev = bp->pdev;
+ u64 valid_bit = 0;
+ int i;
- if (ring->nr_pages > 1) {
- ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
- ring->nr_pages * 8,
- &ring->pg_tbl_map,
+ if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+ if (rmem->nr_pages > 1) {
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ rmem->nr_pages * 8,
+ &rmem->pg_tbl_map,
GFP_KERNEL);
- if (!ring->pg_tbl)
+ if (!rmem->pg_tbl)
return -ENOMEM;
}
- for (i = 0; i < ring->nr_pages; i++) {
- ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- ring->page_size,
- &ring->dma_arr[i],
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
GFP_KERNEL);
- if (!ring->pg_arr[i])
+ if (!rmem->pg_arr[i])
return -ENOMEM;
- if (ring->nr_pages > 1)
- ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ if (rmem->nr_pages > 1) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
}
- if (ring->vmem_size) {
- *ring->vmem = vzalloc(ring->vmem_size);
- if (!(*ring->vmem))
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
return -ENOMEM;
}
return 0;
@@ -2285,10 +2453,10 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = NULL;
ring = &rxr->rx_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
ring = &rxr->rx_agg_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
}
}
@@ -2315,15 +2483,16 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (rc < 0)
return rc;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
+ ring->grp_idx = i;
if (agg_rings) {
u16 mem_size;
ring = &rxr->rx_agg_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2366,7 +2535,7 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
}
}
@@ -2397,7 +2566,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2443,6 +2612,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_ring_struct *ring;
+ int j;
if (!bnapi)
continue;
@@ -2450,12 +2620,51 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+
+ if (cpr2) {
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ kfree(cpr2);
+ cpr->cp_ring_arr[j] = NULL;
+ }
+ }
+ }
+}
+
+static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
+ if (!cpr)
+ return NULL;
+
+ ring = &cpr->cp_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->cp_desc_ring;
+ rmem->dma_arr = cpr->cp_desc_mapping;
+ rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
+ rc = bnxt_alloc_ring(bp, rmem);
+ if (rc) {
+ bnxt_free_ring(bp, rmem);
+ kfree(cpr);
+ cpr = NULL;
}
+ return cpr;
}
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
+ bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, rc, ulp_base_vec, ulp_msix;
ulp_msix = bnxt_get_ulp_msix_num(bp);
@@ -2469,9 +2678,10 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
+ cpr->bnapi = bnapi;
ring = &cpr->cp_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2479,6 +2689,29 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
ring->map_idx = i + ulp_msix;
else
ring->map_idx = i;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+
+ if (i < bp->rx_nr_rings) {
+ struct bnxt_cp_ring_info *cpr2 =
+ bnxt_alloc_cp_sub_ring(bp);
+
+ cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
+ if (!cpr2)
+ return -ENOMEM;
+ cpr2->bnapi = bnapi;
+ }
+ if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ struct bnxt_cp_ring_info *cpr2 =
+ bnxt_alloc_cp_sub_ring(bp);
+
+ cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
+ if (!cpr2)
+ return -ENOMEM;
+ cpr2->bnapi = bnapi;
+ }
}
return 0;
}
@@ -2489,6 +2722,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_ring_mem_info *rmem;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
@@ -2499,31 +2733,34 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
- ring->nr_pages = bp->cp_nr_pages;
- ring->page_size = HW_CMPD_RING_SIZE;
- ring->pg_arr = (void **)cpr->cp_desc_ring;
- ring->dma_arr = cpr->cp_desc_mapping;
- ring->vmem_size = 0;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->cp_desc_ring;
+ rmem->dma_arr = cpr->cp_desc_mapping;
+ rmem->vmem_size = 0;
rxr = bnapi->rx_ring;
if (!rxr)
goto skip_rx;
ring = &rxr->rx_ring_struct;
- ring->nr_pages = bp->rx_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)rxr->rx_desc_ring;
- ring->dma_arr = rxr->rx_desc_mapping;
- ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
- ring->vmem = (void **)&rxr->rx_buf_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct;
- ring->nr_pages = bp->rx_agg_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
- ring->dma_arr = rxr->rx_agg_desc_mapping;
- ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
- ring->vmem = (void **)&rxr->rx_agg_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
txr = bnapi->tx_ring;
@@ -2531,12 +2768,13 @@ skip_rx:
continue;
ring = &txr->tx_ring_struct;
- ring->nr_pages = bp->tx_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)txr->tx_desc_ring;
- ring->dma_arr = txr->tx_desc_mapping;
- ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- ring->vmem = (void **)&txr->tx_buf_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
}
}
@@ -2546,8 +2784,8 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
u32 prod;
struct rx_bd **rx_buf_ring;
- rx_buf_ring = (struct rx_bd **)ring->pg_arr;
- for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
int j;
struct rx_bd *rxbd;
@@ -2649,7 +2887,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
static void bnxt_init_cp_rings(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
@@ -2658,6 +2896,17 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+
+ if (!cpr2)
+ continue;
+
+ ring = &cpr2->cp_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
+ }
}
}
@@ -2761,10 +3010,12 @@ static void bnxt_init_vnics(struct bnxt *bp)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+ for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
@@ -2978,6 +3229,9 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto vnic_skip_grps;
+
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
max_rings = bp->rx_nr_rings;
else
@@ -2988,7 +3242,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
rc = -ENOMEM;
goto out;
}
-
+vnic_skip_grps:
if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
@@ -3042,7 +3296,7 @@ static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
if (bp->hwrm_short_cmd_req_addr) {
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
bp->hwrm_short_cmd_req_addr,
bp->hwrm_short_cmd_req_dma_addr);
bp->hwrm_short_cmd_req_addr = NULL;
@@ -3054,7 +3308,7 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
struct pci_dev *pdev = bp->pdev;
bp->hwrm_short_cmd_req_addr =
- dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
&bp->hwrm_short_cmd_req_dma_addr,
GFP_KERNEL);
if (!bp->hwrm_short_cmd_req_addr)
@@ -3078,6 +3332,13 @@ static void bnxt_free_stats(struct bnxt *bp)
bp->hw_rx_port_stats = NULL;
}
+ if (bp->hw_tx_port_stats_ext) {
+ dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
+ bp->hw_tx_port_stats_ext,
+ bp->hw_tx_port_stats_ext_map);
+ bp->hw_tx_port_stats_ext = NULL;
+ }
+
if (bp->hw_rx_port_stats_ext) {
dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
bp->hw_rx_port_stats_ext,
@@ -3152,6 +3413,13 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats_ext)
return 0;
+ if (bp->hwrm_spec_code >= 0x10902) {
+ bp->hw_tx_port_stats_ext =
+ dma_zalloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
+ }
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
return 0;
@@ -3290,6 +3558,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr =
+ &bp->bnapi[i]->cp_ring;
+
+ cpr->cp_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ }
}
bp->rx_ring = kcalloc(bp->rx_nr_rings,
@@ -3299,7 +3574,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
return -ENOMEM;
for (i = 0; i < bp->rx_nr_rings; i++) {
- bp->rx_ring[i].bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ }
+ rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
}
@@ -3321,12 +3604,16 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
j = bp->rx_nr_rings;
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
- bp->tx_ring[i].bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ txr->tx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ txr->bnapi = bp->bnapi[j];
+ bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
- bp->tx_ring[i].txq_index = i -
- bp->tx_nr_rings_xdp;
+ txr->txq_index = i - bp->tx_nr_rings_xdp;
bp->bnapi[j]->tx_int = bnxt_tx_int;
} else {
bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
@@ -3386,7 +3673,7 @@ static void bnxt_disable_int(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID)
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
}
}
@@ -3422,7 +3709,7 @@ static void bnxt_enable_int(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
}
}
@@ -3455,12 +3742,27 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
+ if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
+ if (msg_len > bp->hwrm_max_ext_req_len ||
+ !bp->hwrm_short_cmd_req_addr)
+ return -EINVAL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+ u16 max_msg_len;
+
+ /* Set boundary for maximum extended request length for short
+ * cmd format. If passed up from device use the max supported
+ * internal req length.
+ */
+ max_msg_len = bp->hwrm_max_ext_req_len;
memcpy(short_cmd_req, req, msg_len);
- memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
- msg_len);
+ if (msg_len < max_msg_len)
+ memset(short_cmd_req + msg_len, 0,
+ max_msg_len - msg_len);
short_input.req_type = req->req_type;
short_input.signature =
@@ -3989,13 +4291,48 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info;
+
+ grp_info = &bp->grp_info[ring->grp_idx];
+ return grp_info->cp_fw_ring_id;
+}
+
+static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
+ return cpr->cp_ring_struct.fw_ring_id;
+ } else {
+ return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
+ }
+}
+
+static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = txr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
+ return cpr->cp_ring_struct.fw_ring_id;
+ } else {
+ return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
+ }
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
- if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
@@ -4026,6 +4363,51 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss) {
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return 0;
+ }
+ req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ for (i = 0, k = 0; i < nr_ctxs; i++) {
+ __le16 *ring_tbl = vnic->rss_table;
+ int rc;
+
+ req.ring_table_pair_index = i;
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ for (j = 0; j < 64; j++) {
+ u16 ring_id;
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ rxr++;
+ k++;
+ if (k == max_rings) {
+ k = 0;
+ rxr = &bp->rx_ring[0];
+ }
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -EIO;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -4109,6 +4491,18 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
+
+ req.default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req.default_cmpl_ring_id =
+ cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
+ req.enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ goto vnic_mru;
+ }
req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
/* Only RSS support for now TBD: COS & LB */
if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
@@ -4141,13 +4535,13 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
- req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
-
req.lb_rule = cpu_to_le16(0xffff);
+vnic_mru:
req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN);
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp))
def_vlan = bp->vf.vlan;
@@ -4195,6 +4589,10 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
struct hwrm_vnic_alloc_input req = {0};
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto vnic_no_ring_grps;
/* map ring groups to this vnic */
for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
@@ -4204,12 +4602,12 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
j, nr_rings);
break;
}
- bp->vnic_info[vnic_id].fw_grp_ids[j] =
- bp->grp_info[grp_idx].fw_grp_id;
+ vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
}
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+vnic_no_ring_grps:
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
if (vnic_id == 0)
req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
@@ -4218,7 +4616,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
- bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -4238,7 +4636,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
@@ -4253,6 +4652,9 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
u16 i;
u32 rc = 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct hwrm_ring_grp_alloc_input req = {0};
@@ -4285,7 +4687,7 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
u32 rc = 0;
struct hwrm_ring_grp_free_input req = {0};
- if (!bp->grp_info)
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@ -4314,45 +4716,90 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
int rc = 0, err = 0;
struct hwrm_ring_alloc_input req = {0};
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
struct bnxt_ring_grp_info *grp_info;
u16 ring_id;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
- if (ring->nr_pages > 1) {
- req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ if (rmem->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
/* Page size is in log2 units */
req.page_size = BNXT_PAGE_SHIFT;
req.page_tbl_depth = 1;
} else {
- req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
}
req.fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
req.logical_id = cpu_to_le16(map_index);
switch (ring_type) {
- case HWRM_RING_ALLOC_TX:
+ case HWRM_RING_ALLOC_TX: {
+ struct bnxt_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnxt_tx_ring_info,
+ tx_ring_struct);
req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
/* Association of transmit ring with completion ring */
grp_info = &bp->grp_info[ring->grp_idx];
- req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
req.length = cpu_to_le32(bp->tx_ring_mask + 1);
req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req.queue_id = cpu_to_le16(ring->queue_id);
break;
+ }
case HWRM_RING_ALLOC_RX:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u16 flags = 0;
+
+ /* Association of rx ring with stats context */
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req.flags = cpu_to_le16(flags);
+ }
break;
case HWRM_RING_ALLOC_AGG:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ } else {
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ }
req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* Association of cp ring with nq */
+ grp_info = &bp->grp_info[map_index];
+ req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.cq_handle = cpu_to_le64(ring->handle);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ }
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
@@ -4401,22 +4848,67 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
return rc;
}
+static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
+ u32 map_idx, u32 xid)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ db->doorbell = bp->bar1 + 0x10000;
+ else
+ db->doorbell = bp->bar1 + 0x4000;
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= (u64)xid << DBR_XID_SFT;
+ } else {
+ db->doorbell = bp->bar1 + map_idx * 0x80;
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key32 = DB_KEY_TX;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key32 = DB_KEY_RX;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key32 = DB_KEY_CP;
+ break;
+ }
+ }
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
+ u32 type;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = HWRM_RING_ALLOC_NQ;
+ else
+ type = HWRM_RING_ALLOC_CMPL;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
u32 map_idx = ring->map_idx;
- cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
if (!i) {
@@ -4426,33 +4918,69 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
+ type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 map_idx = i;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
- map_idx);
+ struct bnxt_ring_struct *ring;
+ u32 map_idx;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = txr->bnapi;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
+ u32 type2 = HWRM_RING_ALLOC_CMPL;
+
+ cpr = &bnapi->cp_ring;
+ cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
+ ring = &cpr2->cp_ring_struct;
+ ring->handle = BNXT_TX_HDL;
+ map_idx = bnapi->index;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
+ }
+ ring = &txr->tx_ring_struct;
+ map_idx = i;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
+ bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
+ type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 map_idx = rxr->bnapi->index;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 type2 = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_cp_ring_info *cpr2;
+
+ cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
+ ring = &cpr2->cp_ring_struct;
+ ring->handle = BNXT_RX_HDL;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
+ }
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring =
@@ -4460,15 +4988,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
u32 grp_idx = ring->grp_idx;
u32 map_idx = grp_idx + bp->rx_nr_rings;
- rc = hwrm_ring_alloc_send_msg(bp, ring,
- HWRM_RING_ALLOC_AGG,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
@@ -4504,6 +5030,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
+ u32 type;
int i;
if (!bp->bnapi)
@@ -4512,9 +5039,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 grp_idx = txr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_TX,
@@ -4528,8 +5055,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
@@ -4541,15 +5069,19 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = RING_FREE_REQ_RING_TYPE_RX_AGG;
+ else
+ type = RING_FREE_REQ_RING_TYPE_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
+ hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -4564,14 +5096,32 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = RING_FREE_REQ_RING_TYPE_NQ;
+ else
+ type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ struct bnxt_ring_struct *ring;
+ int j;
+
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (cpr2) {
+ ring = &cpr2->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ hwrm_ring_free_send_msg(bp, ring, type,
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -4579,6 +5129,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
static int bnxt_hwrm_get_rings(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -4609,6 +5162,22 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
cp = min_t(u16, cp, stats);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ int rx = hw_resc->resv_rx_rings;
+ int tx = hw_resc->resv_tx_rings;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ cp = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ }
hw_resc->resv_cp_rings = cp;
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4634,6 +5203,8 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
+static bool bnxt_rfs_supported(struct bnxt *bp);
+
static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
int tx_rings, int rx_rings, int ring_grps,
@@ -4647,15 +5218,38 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_tx_rings = cpu_to_le16(tx_rings);
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= tx_rings + ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= rx_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ }
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_msix = cpu_to_le16(cp_rings);
+ req->num_rsscos_ctxs =
+ cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ } else {
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(1);
+ if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ bnxt_rfs_supported(bp))
+ req->num_rsscos_ctxs =
+ cpu_to_le16(ring_grps + 1);
+ }
req->num_stat_ctxs = req->num_cmpl_rings;
req->num_vnics = cpu_to_le16(vnics);
}
@@ -4672,16 +5266,33 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= tx_rings + ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
+ req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ } else {
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ }
req->num_stat_ctxs = req->num_cmpl_rings;
req->num_vnics = cpu_to_le16(vnics);
@@ -4725,10 +5336,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
- req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
- req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4774,20 +5381,19 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
return true;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
+ hw_resc->resv_vnics != vnic ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
return false;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared);
-
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -4803,7 +5409,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -4866,9 +5472,11 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
- FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
- FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -4887,12 +5495,16 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (BNXT_NEW_RM(bp))
+ if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
- FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ else
+ flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
+ }
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -4915,46 +5527,140 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
cp_rings, vnics);
}
-static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
+{
+ struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ struct hwrm_ring_aggint_qcaps_input req = {0};
+ int rc;
+
+ coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
+ coal_cap->num_cmpl_dma_aggr_max = 63;
+ coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
+ coal_cap->cmpl_aggr_dma_tmr_max = 65535;
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
+ coal_cap->int_lat_tmr_min_max = 65535;
+ coal_cap->int_lat_tmr_max_max = 65535;
+ coal_cap->num_cmpl_aggr_int_max = 65535;
+ coal_cap->timer_units = 80;
+
+ if (bp->hwrm_spec_code < 0x10902)
+ return;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
+ coal_cap->nq_params = le32_to_cpu(resp->nq_params);
+ coal_cap->num_cmpl_dma_aggr_max =
+ le16_to_cpu(resp->num_cmpl_dma_aggr_max);
+ coal_cap->num_cmpl_dma_aggr_during_int_max =
+ le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
+ coal_cap->cmpl_aggr_dma_tmr_max =
+ le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max =
+ le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
+ coal_cap->int_lat_tmr_min_max =
+ le16_to_cpu(resp->int_lat_tmr_min_max);
+ coal_cap->int_lat_tmr_max_max =
+ le16_to_cpu(resp->int_lat_tmr_max_max);
+ coal_cap->num_cmpl_aggr_int_max =
+ le16_to_cpu(resp->num_cmpl_aggr_int_max);
+ coal_cap->timer_units = le16_to_cpu(resp->timer_units);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
+{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+
+ return usec * 1000 / coal_cap->timer_units;
+}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
+ struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
- u16 val, tmr, max, flags;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u32 cmpl_params = coal_cap->cmpl_params;
+ u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
max = hw_coal->bufs_per_record * hw_coal->budget;
+ max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
req->num_cmpl_aggr_int = cpu_to_le16(val);
- /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
- val = min_t(u16, val, 63);
+ val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
req->num_cmpl_dma_aggr = cpu_to_le16(val);
- /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
- val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63);
+ val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
+ coal_cap->num_cmpl_dma_aggr_during_int_max);
req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
- tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
- tmr = max_t(u16, tmr, 1);
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
+ tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
req->int_lat_tmr_max = cpu_to_le16(tmr);
/* min timer set to 1/2 of interrupt timer */
- val = tmr / 2;
- req->int_lat_tmr_min = cpu_to_le16(val);
+ if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
+ val = tmr / 2;
+ val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
+ req->int_lat_tmr_min = cpu_to_le16(val);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ }
/* buf timer set to 1/4 of interrupt timer */
- val = max_t(u16, tmr / 4, 1);
+ val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
- tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
- tmr = max_t(u16, tmr, 1);
- req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ if (cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
+ val = clamp_t(u16, tmr, 1,
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ req->enables |=
+ cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
+ }
- flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
- if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
+ if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
+ hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
req->flags = cpu_to_le16(flags);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
+}
+
+/* Caller holds bp->hwrm_cmd_lock */
+static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct bnxt_coal *hw_coal)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u32 nq_params = coal_cap->nq_params;
+ u16 tmr;
+
+ if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+ req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
+ req.flags =
+ cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
+
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
+ tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
+ req.int_lat_tmr_min = cpu_to_le16(tmr);
+ req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
@@ -4962,7 +5668,6 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_coal coal;
- unsigned int grp_idx;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -4978,10 +5683,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_set_coal_params(&coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
- grp_idx = bnapi->index;
- req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+ req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
HWRM_CMD_TIMEOUT);
@@ -4998,22 +5702,46 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
- bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
+ bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_coal *hw_coal;
+ u16 ring_id;
req = &req_rx;
- if (!bnapi->rx_ring)
+ if (!bnapi->rx_ring) {
+ ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
req = &req_tx;
- req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ } else {
+ ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+ }
+ req->ring_id = cpu_to_le16(ring_id);
rc = _hwrm_send_message(bp, req, sizeof(*req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+
+ if (bnapi->rx_ring && bnapi->tx_ring) {
+ req = &req_tx;
+ ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = _hwrm_send_message(bp, req, sizeof(*req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ if (bnapi->rx_ring)
+ hw_coal = &bp->rx_coal;
+ else
+ hw_coal = &bp->tx_coal;
+ __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5140,6 +5868,304 @@ func_qcfg_exit:
return rc;
}
+static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_input req = {0};
+ struct hwrm_func_backing_store_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ int i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
+ ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
+ ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
+ ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
+ ctx->vnic_max_vnic_entries =
+ le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctx->vnic_max_ring_table_entries =
+ le16_to_cpu(resp->vnic_max_ring_table_entries);
+ ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
+ ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
+ ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctx->tqm_min_entries_per_ring =
+ le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctx->tqm_max_entries_per_ring =
+ le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+ if (!ctx->tqm_entries_multiple)
+ ctx->tqm_entries_multiple = 1;
+ ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ } else {
+ rc = 0;
+ }
+ctx_err:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ u8 pg_size = 0;
+
+ if (BNXT_PAGE_SHIFT == 13)
+ pg_size = 1 << 4;
+ else if (BNXT_PAGE_SIZE == 16)
+ pg_size = 2 << 4;
+
+ *pg_attr = pg_size;
+ if (rmem->nr_pages > 1) {
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
+{
+ struct hwrm_func_backing_store_cfg_input req = {0};
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ __le32 *num_entries;
+ __le64 *pg_dir;
+ u8 *pg_attr;
+ int i, rc;
+ u32 ena;
+
+ if (!ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
+ req.enables = cpu_to_le32(enables);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
+ ctx_pg = &ctx->qp_mem;
+ req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
+ req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
+ req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.qpc_pg_size_qpc_lvl,
+ &req.qpc_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
+ ctx_pg = &ctx->srq_mem;
+ req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
+ req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.srq_pg_size_srq_lvl,
+ &req.srq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
+ ctx_pg = &ctx->cq_mem;
+ req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
+ req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
+ &req.cq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
+ ctx_pg = &ctx->vnic_mem;
+ req.vnic_num_vnic_entries =
+ cpu_to_le16(ctx->vnic_max_vnic_entries);
+ req.vnic_num_ring_table_entries =
+ cpu_to_le16(ctx->vnic_max_ring_table_entries);
+ req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.vnic_pg_size_vnic_lvl,
+ &req.vnic_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
+ ctx_pg = &ctx->stat_mem;
+ req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
+ req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.stat_pg_size_stat_lvl,
+ &req.stat_page_dir);
+ }
+ for (i = 0, num_entries = &req.tqm_sp_num_entries,
+ pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
+ pg_dir = &req.tqm_sp_page_dir,
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ if (!(enables & ena))
+ continue;
+
+ req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
+ ctx_pg = ctx->tqm_mem[i];
+ *num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
+static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (!mem_size)
+ return 0;
+
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (rmem->nr_pages > MAX_CTX_PAGES) {
+ rmem->nr_pages = 0;
+ return -EINVAL;
+ }
+ rmem->page_size = BNXT_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+ return bnxt_alloc_ring(bp, rmem);
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ int i;
+
+ if (!ctx)
+ return;
+
+ if (ctx->tqm_mem[0]) {
+ for (i = 0; i < bp->max_q + 1; i++)
+ bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
+ kfree(ctx->tqm_mem[0]);
+ ctx->tqm_mem[0] = NULL;
+ }
+
+ bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
+ ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+}
+
+static int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ u32 mem_size, ena, entries;
+ int i, rc;
+
+ rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
+ rc);
+ return rc;
+ }
+ ctx = bp->ctx;
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+ ctx_pg = &ctx->qp_mem;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->srq_mem;
+ ctx_pg->entries = ctx->srq_max_l2_entries;
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->cq_mem;
+ ctx_pg->entries = ctx->cq_max_l2_entries;
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->vnic_mem;
+ ctx_pg->entries = ctx->vnic_max_vnic_entries +
+ ctx->vnic_max_ring_table_entries;
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->stat_mem;
+ ctx_pg->entries = ctx->stat_max_entries;
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ entries = ctx->qp_max_l2_entries;
+ entries = roundup(entries, ctx->tqm_entries_multiple);
+ entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
+ ctx->tqm_max_entries_per_ring);
+ for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
+ ctx_pg = ctx->tqm_mem[i];
+ ctx_pg->entries = entries;
+ mem_size = ctx->tqm_entry_size * entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ }
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
+ rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -5178,6 +6204,13 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u16 max_msix = le16_to_cpu(resp->max_msix);
+
+ hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+ }
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -5267,6 +6300,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
return rc;
if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_alloc_ctx_mem(bp);
+ if (rc)
+ return rc;
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
@@ -5311,13 +6347,15 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
qptr = &resp->queue_id0;
for (i = 0, j = 0; i < bp->max_tc; i++) {
- bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_id = *qptr;
+ bp->q_ids[i] = *qptr++;
bp->q_info[j].queue_profile = *qptr++;
bp->tc_to_qidx[j] = j;
if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
(no_rdma && BNXT_PF(bp)))
j++;
}
+ bp->max_q = bp->max_tc;
bp->max_tc = max_t(u8, j, 1);
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
@@ -5367,8 +6405,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj_8b >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+ }
+ if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
bp->chip_num = le16_to_cpu(resp->chip_num);
if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
@@ -5425,8 +6467,10 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
{
+ struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
@@ -5435,7 +6479,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+ req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
+ bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+ } else {
+ bp->fw_rx_stats_ext_size = 0;
+ bp->fw_tx_stats_ext_size = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
@@ -5540,7 +6596,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
@@ -5596,6 +6652,53 @@ vnic_setup_err:
return rc;
}
+static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+{
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
+ vnic_id, i, rc);
+ break;
+ }
+ bp->rsscos_nr_ctxs++;
+ }
+ if (i < nr_ctxs)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+ return rc;
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return __bnxt_setup_vnic_p5(bp, vnic_id);
+ else
+ return __bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6214,12 +7317,15 @@ static void bnxt_init_napi(struct bnxt *bp)
struct bnxt_napi *bnapi;
if (bp->flags & BNXT_FLAG_USING_MSIX) {
- if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
@@ -6976,10 +8082,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
netdev_err(bp->dev, "Failed to reserve default rings at open\n");
return rc;
}
- rc = bnxt_reserve_rings(bp);
- if (rc)
- return rc;
}
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_USING_MSIX)) {
/* disable RFS if falling back to INTA */
@@ -7451,6 +8557,8 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -7464,6 +8572,8 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return false;
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
@@ -7984,6 +9094,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->sp_task, bnxt_sp_task);
spin_lock_init(&bp->ntp_fltr_lock);
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bp->db_lock);
+#endif
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
@@ -8549,6 +9662,9 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
bnxt_cleanup_pci(bp);
free_netdev(dev);
}
@@ -8854,6 +9970,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(ent->driver_data))
bp->flags |= BNXT_FLAG_VF;
@@ -8880,12 +9997,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
}
+ if (BNXT_CHIP_P5(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P5;
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err_pci_clean;
@@ -8900,7 +10021,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
dev->hw_enc_features =
@@ -8914,7 +10035,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
if (dev->features & NETIF_F_GRO_HW)
@@ -8925,10 +10046,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&bp->sriov_cfg_wait);
mutex_init(&bp->sriov_lock);
#endif
- bp->gro_func = bnxt_gro_func_5730x;
- if (BNXT_CHIP_P4_PLUS(bp))
- bp->gro_func = bnxt_gro_func_5731x;
- else
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ bp->gro_func = bnxt_gro_func_5730x;
+ if (BNXT_CHIP_P4(bp))
+ bp->gro_func = bnxt_gro_func_5731x;
+ }
+ if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
rc = bnxt_hwrm_func_drv_rgtr(bp);
@@ -8941,6 +10064,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->ulp_probe = bnxt_ulp_probe;
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err_pci_clean;
+ }
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
@@ -8955,13 +10085,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -EADDRNOTAVAIL;
goto init_err_pci_clean;
}
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -8979,7 +10102,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
- bnxt_set_max_func_irqs(bp, max_irqs);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
@@ -8992,7 +10114,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -9027,6 +10149,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+ bnxt_hwrm_coal_params_qcaps(bp);
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -9059,6 +10183,9 @@ init_err_cleanup_tc:
init_err_pci_clean:
bnxt_free_hwrm_resources(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
bnxt_cleanup_pci(bp);
init_err_free:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bde384630a75..0fe57e36912b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.2"
+#define DRV_MODULE_VERSION "1.10.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 9
-#define DRV_VER_UPD 2
+#define DRV_VER_MIN 10
+#define DRV_VER_UPD 0
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -403,6 +403,19 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+struct nqe_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -416,6 +429,25 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+/* 64-bit doorbell */
+#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_XID_MASK 0x000fffff00000000ULL
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_RQ (0x1ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_SRQ_ARM (0x3ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60)
+#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60)
+#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NULL (0xfULL << 60)
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -505,6 +537,9 @@ struct rx_tpa_end_cmp_ext {
(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
!((raw_cons) & bp->cp_bit))
+#define NQ_CMP_VALID(nqcmp, raw_cons) \
+ (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit))
+
#define TX_CMP_TYPE(txcmp) \
(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
@@ -577,9 +612,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
-struct bnxt_ring_struct {
+struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
+ u32 flags;
+#define BNXT_RMEM_VALID_PTE_FLAG 1
+#define BNXT_RMEM_RING_PTE_FLAG 2
+
void **pg_arr;
dma_addr_t *dma_arr;
@@ -588,12 +627,17 @@ struct bnxt_ring_struct {
int vmem_size;
void **vmem;
+};
+
+struct bnxt_ring_struct {
+ struct bnxt_ring_mem_info ring_mem;
u16 fw_ring_id; /* Ring id filled by Chimp FW */
union {
u16 grp_idx;
u16 map_idx; /* Used by cmpl rings */
};
+ u32 handle;
u8 queue_id;
};
@@ -609,12 +653,20 @@ struct tx_push_buffer {
u32 data[25];
};
+struct bnxt_db_info {
+ void __iomem *doorbell;
+ union {
+ u64 db_key64;
+ u32 db_key32;
+ };
+};
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
- void __iomem *tx_doorbell;
+ struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
struct bnxt_sw_tx_bd *tx_buf_ring;
@@ -631,6 +683,42 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+#define BNXT_LEGACY_COAL_CMPL_PARAMS \
+ (RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT)
+
+#define BNXT_COAL_CMPL_ENABLES \
+ (RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT)
+
+#define BNXT_COAL_CMPL_MIN_TMR_ENABLE \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN
+
+#define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT
+
+struct bnxt_coal_cap {
+ u32 cmpl_params;
+ u32 nq_params;
+ u16 num_cmpl_dma_aggr_max;
+ u16 num_cmpl_dma_aggr_during_int_max;
+ u16 cmpl_aggr_dma_tmr_max;
+ u16 cmpl_aggr_dma_tmr_during_int_max;
+ u16 int_lat_tmr_min_max;
+ u16 int_lat_tmr_max_max;
+ u16 num_cmpl_aggr_int_max;
+ u16 timer_units;
+};
+
struct bnxt_coal {
u16 coal_ticks;
u16 coal_ticks_irq;
@@ -675,8 +763,8 @@ struct bnxt_rx_ring_info {
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
u16 rx_next_cons;
- void __iomem *rx_doorbell;
- void __iomem *rx_agg_doorbell;
+ struct bnxt_db_info rx_db;
+ struct bnxt_db_info rx_agg_db;
struct bpf_prog *xdp_prog;
@@ -703,8 +791,12 @@ struct bnxt_rx_ring_info {
};
struct bnxt_cp_ring_info {
+ struct bnxt_napi *bnapi;
u32 cp_raw_cons;
- void __iomem *cp_doorbell;
+ struct bnxt_db_info cp_db;
+
+ u8 had_work_done:1;
+ u8 has_more_work:1;
struct bnxt_coal rx_ring_coal;
u64 rx_packets;
@@ -713,7 +805,10 @@ struct bnxt_cp_ring_info {
struct net_dim dim;
- struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+ union {
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+ struct nqe_cn *nq_desc_ring[MAX_CP_PAGES];
+ };
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -723,6 +818,10 @@ struct bnxt_cp_ring_info {
u64 rx_l4_csum_errors;
struct bnxt_ring_struct cp_ring_struct;
+
+ struct bnxt_cp_ring_info *cp_ring_arr[2];
+#define BNXT_RX_HDL 0
+#define BNXT_TX_HDL 1
};
struct bnxt_napi {
@@ -736,6 +835,9 @@ struct bnxt_napi {
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int);
+ int tx_pkts;
+ u8 events;
+
u32 flags;
#define BNXT_NAPI_FLAG_XDP 0x1
@@ -755,6 +857,7 @@ struct bnxt_irq {
#define HWRM_RING_ALLOC_RX 0x2
#define HWRM_RING_ALLOC_AGG 0x4
#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
#define INVALID_STATS_CTX_ID -1
@@ -768,7 +871,7 @@ struct bnxt_ring_grp_info {
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
-#define BNXT_MAX_CTX_PER_VNIC 2
+#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
@@ -1069,6 +1172,55 @@ struct bnxt_vf_rep {
struct bnxt_vf_rep_stats tx_stats;
};
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+
+struct bnxt_ctx_pg_info {
+ u32 entries;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnxt_ring_mem_info ring_mem;
+};
+
+struct bnxt_ctx_mem_info {
+ u32 qp_max_entries;
+ u16 qp_min_qp1_entries;
+ u16 qp_max_l2_entries;
+ u16 qp_entry_size;
+ u16 srq_max_l2_entries;
+ u32 srq_max_entries;
+ u16 srq_entry_size;
+ u16 cq_max_l2_entries;
+ u32 cq_max_entries;
+ u16 cq_entry_size;
+ u16 vnic_max_vnic_entries;
+ u16 vnic_max_ring_table_entries;
+ u16 vnic_entry_size;
+ u32 stat_max_entries;
+ u16 stat_entry_size;
+ u16 tqm_entry_size;
+ u32 tqm_min_entries_per_ring;
+ u32 tqm_max_entries_per_ring;
+ u32 mrav_max_entries;
+ u16 mrav_entry_size;
+ u16 tim_entry_size;
+ u32 tim_max_entries;
+ u8 tqm_entries_multiple;
+
+ u32 flags;
+ #define BNXT_CTX_FLAG_INITED 0x01
+
+ struct bnxt_ctx_pg_info qp_mem;
+ struct bnxt_ctx_pg_info srq_mem;
+ struct bnxt_ctx_pg_info cq_mem;
+ struct bnxt_ctx_pg_info vnic_mem;
+ struct bnxt_ctx_pg_info stat_mem;
+ struct bnxt_ctx_pg_info *tqm_mem[9];
+};
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1098,6 +1250,8 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
+#define CHIP_NUM_57500 0x1750
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
@@ -1144,6 +1298,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
+ #define BNXT_FLAG_CHIP_P5 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1190,15 +1345,24 @@ struct bnxt {
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
-/* Chip class phase 4 and later */
-#define BNXT_CHIP_P4_PLUS(bp) \
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp) \
+ ((bp)->chip_num == CHIP_NUM_57500)
+
+/* Chip class phase 4.x */
+#define BNXT_CHIP_P4(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+#define BNXT_CHIP_P4_PLUS(bp) \
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -1261,6 +1425,8 @@ struct bnxt {
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
+ u8 q_ids[BNXT_MAX_QUEUE];
+ u8 max_q;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -1305,12 +1471,17 @@ struct bnxt {
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
+ struct rx_port_stats_ext *hw_tx_port_stats_ext;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
+ dma_addr_t hw_tx_port_stats_ext_map;
int hw_port_stats_size;
+ u16 fw_rx_stats_ext_size;
+ u16 fw_tx_stats_ext_size;
u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
@@ -1328,11 +1499,10 @@ struct bnxt {
u8 port_count;
u16 br_mode;
+ struct bnxt_coal_cap coal_cap;
struct bnxt_coal rx_coal;
struct bnxt_coal tx_coal;
-#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
-
u32 stats_coal_ticks;
#define BNXT_DEF_STATS_COAL_TICKS 1000000
#define BNXT_MIN_STATS_COAL_TICKS 250000
@@ -1360,6 +1530,7 @@ struct bnxt {
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
+ struct bnxt_ctx_mem_info *ctx;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
struct bnxt_vf_info vf;
@@ -1374,6 +1545,11 @@ struct bnxt {
struct mutex sriov_lock;
#endif
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
+
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
@@ -1425,6 +1601,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_TX_STATS_EXT_OFFSET(counter) \
+ (offsetof(struct tx_port_stats_ext, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c
@@ -1443,21 +1622,46 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+#if BITS_PER_LONG == 32
+#define writeq(val64, db) \
+do { \
+ spin_lock(&bp->db_lock); \
+ writel((val64) & 0xffffffff, db); \
+ writel((val64) >> 32, (db) + 4); \
+ spin_unlock(&bp->db_lock); \
+} while (0)
+
+#define writeq_relaxed writeq
+#endif
+
/* For TX and RX ring doorbells with no ordering guarantee*/
-static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db,
- u32 val)
+static inline void bnxt_db_write_relaxed(struct bnxt *bp,
+ struct bnxt_db_info *db, u32 idx)
{
- writel_relaxed(val, db);
- if (bp->flags & BNXT_FLAG_DOUBLE_DB)
- writel_relaxed(val, db);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ writeq_relaxed(db->db_key64 | idx, db->doorbell);
+ } else {
+ u32 db_val = db->db_key32 | idx;
+
+ writel_relaxed(db_val, db->doorbell);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel_relaxed(db_val, db->doorbell);
+ }
}
/* For TX and RX ring doorbells */
-static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
+static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 idx)
{
- writel(val, db);
- if (bp->flags & BNXT_FLAG_DOUBLE_DB)
- writel(val, db);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ writeq(db->db_key64 | idx, db->doorbell);
+ } else {
+ u32 db_val = db->db_key32 | idx;
+
+ writel(db_val, db->doorbell);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel(db_val, db->doorbell);
+ }
}
extern const u16 bnxt_lhint_arr[];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index e52d7af3ab3e..48078564f025 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -148,6 +148,65 @@ reset_coalesce:
#define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+#define BNXT_TX_STATS_EXT_ENTRY(counter) \
+ { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+
+#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
+ BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
+
+#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
+ BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
+ BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
+
+#define BNXT_RX_STATS_EXT_PFC_ENTRIES \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(7)
+
+#define BNXT_TX_STATS_EXT_PFC_ENTRIES \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(7)
+
+#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
+
+#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
+ BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
+ BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(7) \
+
+#define BNXT_TX_STATS_EXT_COS_ENTRIES \
+ BNXT_TX_STATS_EXT_COS_ENTRY(0), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(1), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(2), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(3), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(4), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(5), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(6), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -256,11 +315,20 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
+ BNXT_RX_STATS_EXT_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_PFC_ENTRIES,
+};
+
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_port_stats_ext_arr[] = {
+ BNXT_TX_STATS_EXT_COS_ENTRIES,
+ BNXT_TX_STATS_EXT_PFC_ENTRIES,
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
-#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -272,7 +340,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_PORT_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
- num_stats += BNXT_NUM_PORT_STATS_EXT;
+ num_stats += bp->fw_rx_stats_ext_size +
+ bp->fw_tx_stats_ext_size;
return num_stats;
}
@@ -334,12 +403,17 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+ __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+ __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
- for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats_ext +
+ for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset));
}
+ for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset));
+ }
}
}
@@ -407,10 +481,15 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
+ for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
strcpy(buf, bnxt_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
+ strcpy(buf,
+ bnxt_tx_port_stats_ext_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
}
break;
case ETH_SS_TEST:
@@ -2419,11 +2498,11 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 raw_cons, int pkt_size)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_rx_ring_info *rxr;
struct bnxt_sw_rx_bd *rx_buf;
struct rx_cmp *rxcmp;
u16 cp_cons, cons;
@@ -2431,6 +2510,7 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 len;
int i;
+ rxr = bnapi->rx_ring;
cp_cons = RING_CMP(raw_cons);
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -2451,17 +2531,15 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
return 0;
}
-static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int pkt_size)
{
- struct bnxt_napi *bnapi = bp->bnapi[0];
- struct bnxt_cp_ring_info *cpr;
struct tx_cmp *txcmp;
int rc = -EIO;
u32 raw_cons;
u32 cons;
int i;
- cpr = &bnapi->cp_ring;
raw_cons = cpr->cp_raw_cons;
for (i = 0; i < 200; i++) {
cons = RING_CMP(raw_cons);
@@ -2477,7 +2555,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
*/
dma_rmb();
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
- rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+ rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
break;
@@ -2491,12 +2569,14 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
static int bnxt_run_loopback(struct bnxt *bp)
{
struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ struct bnxt_cp_ring_info *cpr;
int pkt_size, i = 0;
struct sk_buff *skb;
dma_addr_t map;
u8 *data;
int rc;
+ cpr = &txr->bnapi->cp_ring;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
@@ -2520,8 +2600,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod);
- rc = bnxt_poll_loopback(bp, pkt_size);
+ bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
+ rc = bnxt_poll_loopback(bp, cpr, pkt_size);
dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 971ace5d0d4a..5dd086059568 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -37,6 +37,8 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_HWRM_REQUEST 0x1UL
#define TLV_TYPE_HWRM_RESPONSE 0x2UL
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
@@ -186,6 +188,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
#define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
#define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
#define HWRM_STAT_CTX_ALLOC 0xb0UL
#define HWRM_STAT_CTX_FREE 0xb1UL
#define HWRM_STAT_CTX_QUERY 0xb2UL
@@ -235,6 +238,7 @@ struct cmd_nums {
#define HWRM_CFA_PAIR_INFO 0x10fUL
#define HWRM_FW_IPC_MSG 0x110UL
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -295,6 +299,7 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
#define HWRM_DBG_FW_CLI 0xff1aUL
#define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -320,20 +325,21 @@ struct cmd_nums {
/* ret_codes (size:64b/8B) */
struct ret_codes {
__le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
__le16 unused_0[3];
};
@@ -355,10 +361,10 @@ struct hwrm_err_output {
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 25
-#define HWRM_VERSION_STR "1.9.2.25"
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 3
+#define HWRM_VERSION_STR "1.10.0.3"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -396,10 +402,15 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_bld_8b;
u8 netctrl_fw_rsvd_8b;
__le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -528,6 +539,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -539,6 +551,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -652,10 +665,11 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
};
/* hwrm_func_reset_input (size:192b/24B) */
@@ -852,6 +866,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -903,6 +918,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1014,6 +1030,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1214,9 +1231,10 @@ struct hwrm_func_drv_rgtr_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1416,7 +1434,9 @@ struct hwrm_func_resource_qcaps_output {
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
__le16 max_tx_scheduler_inputs;
- u8 unused_0[7];
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ u8 unused_0[5];
u8 valid;
};
@@ -1445,7 +1465,9 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_stat_ctx;
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
- u8 unused_0[4];
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ u8 unused_0[2];
};
/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
@@ -1503,7 +1525,8 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_entry_size;
__le16 tim_entry_size;
__le32 tim_max_entries;
- u8 unused_0[3];
+ u8 unused_0[2];
+ u8 tqm_entries_multiple;
u8 valid;
};
@@ -1917,6 +1940,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -1947,6 +1971,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -1964,6 +1989,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2048,6 +2074,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -2072,6 +2099,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2083,6 +2111,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2107,6 +2136,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2124,6 +2154,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2178,7 +2209,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -2644,7 +2679,8 @@ struct hwrm_port_qstats_ext_output {
__le16 tx_stat_size;
__le16 rx_stat_size;
__le16 total_active_cos_queues;
- u8 unused_0;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
u8 valid;
};
@@ -2685,7 +2721,9 @@ struct hwrm_port_clr_stats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
};
/* hwrm_port_clr_stats_output (size:128b/16B) */
@@ -4574,7 +4612,9 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -4838,13 +4878,19 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
__le32 enables;
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
@@ -4901,6 +4947,8 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
@@ -4958,11 +5006,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
@@ -5064,6 +5118,8 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
@@ -5140,7 +5196,7 @@ struct hwrm_vxlan_ipv6_hdr {
__be32 dest_ip_addr[4];
};
-/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
struct hwrm_cfa_encap_data_vxlan {
u8 src_mac_addr[6];
__le16 unused_0;
@@ -5159,6 +5215,10 @@ struct hwrm_cfa_encap_data_vxlan {
__be16 src_port;
__be16 dst_port;
__be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
};
/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
@@ -5171,15 +5231,18 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE
u8 unused_0[3];
__le32 encap_data[20];
};
@@ -5273,6 +5336,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
@@ -5404,6 +5469,8 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
@@ -5476,19 +5543,22 @@ struct hwrm_cfa_flow_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
@@ -5502,6 +5572,7 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5525,21 +5596,38 @@ struct hwrm_cfa_flow_alloc_input {
__be16 nat_port;
__be16 l2_rewrite_smac[3];
u8 ip_proto;
- u8 unused_0;
-};
-
-/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
struct hwrm_cfa_flow_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 flow_handle;
- u8 unused_0[5];
+ u8 unused_0[2];
+ __le32 flow_id;
+ __le64 ext_flow_handle;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_flow_free_input (size:192b/24B) */
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5548,6 +5636,7 @@ struct hwrm_cfa_flow_free_input {
__le64 resp_addr;
__le16 flow_handle;
u8 unused_0[6];
+ __le64 ext_flow_handle;
};
/* hwrm_cfa_flow_free_output (size:256b/32B) */
@@ -5562,7 +5651,7 @@ struct hwrm_cfa_flow_free_output {
u8 valid;
};
-/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5581,6 +5670,16 @@ struct hwrm_cfa_flow_stats_input {
__le16 flow_handle_8;
__le16 flow_handle_9;
u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
};
/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
@@ -5670,7 +5769,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0[7];
};
@@ -5698,7 +5798,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5727,7 +5828,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5932,10 +6034,11 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 host_idx;
u8 unused_0[5];
};
@@ -5947,10 +6050,11 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 unused_0[6];
u8 valid;
};
@@ -6498,6 +6602,34 @@ struct hwrm_dbg_coredump_retrieve_output {
u8 valid;
};
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 0584d07c8c33..bf6de02be396 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -63,7 +63,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf = &txr->tx_buf_ring[last_tx_cons];
rx_prod = tx_buf->rx_prod;
}
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rx_prod);
}
/* returns the following:
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index e2cdfa75673f..75c1c5ed2387 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,6 +67,7 @@ config CHELSIO_T3
config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n)
+ depends on THERMAL || !THERMAL
select FW_LOADER
select MDIO
select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index bea6a059a8f1..78e5d17a1d5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,3 +12,6 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
+ifdef CONFIG_THERMAL
+cxgb4-objs += cxgb4_thermal.o
+endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b5010bd32ea3..b16f4b3ef4c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -52,6 +52,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/crash_dump.h>
+#include <linux/thermal.h>
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
@@ -890,6 +891,14 @@ struct mps_encap_entry {
atomic_t refcnt;
};
+#if IS_ENABLED(CONFIG_THERMAL)
+struct ch_thermal {
+ struct thermal_zone_device *tzdev;
+ int trip_temp;
+ int trip_type;
+};
+#endif
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -1008,6 +1017,9 @@ struct adapter {
/* Dump buffer for collecting logs in kdump kernel */
struct vmcoredd_data vmcoredd;
+#if IS_ENABLED(CONFIG_THERMAL)
+ struct ch_thermal ch_thermal;
+#endif
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1862,4 +1874,8 @@ void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
int cxgb4_dcb_enabled(const struct net_device *dev);
+
+int cxgb4_thermal_init(struct adapter *adap);
+int cxgb4_thermal_remove(struct adapter *adap);
+
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1a93efa60e71..2de0590a62c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5864,6 +5864,10 @@ fw_attach_fail:
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
+ if (IS_ENABLED(CONFIG_THERMAL) &&
+ !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
+ cxgb4_thermal_init(adapter);
+
print_adapter_info(adapter);
return 0;
@@ -5929,6 +5933,8 @@ static void remove_one(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
cxgb4_ptp_stop(adapter);
+ if (IS_ENABLED(CONFIG_THERMAL))
+ cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any
* valid filters ...
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
new file mode 100644
index 000000000000..28052e7504e5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Written by: Ganesh Goudar (ganeshgr@chelsio.com)
+ */
+
+#include "cxgb4.h"
+
+#define CXGB4_NUM_TRIPS 1
+
+static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
+ int *temp)
+{
+ struct adapter *adap = tzdev->devdata;
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0 || val == 0)
+ return -1;
+
+ *temp = val * 1000;
+ return 0;
+}
+
+static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+ int trip, enum thermal_trip_type *type)
+{
+ struct adapter *adap = tzdev->devdata;
+
+ if (!adap->ch_thermal.trip_temp)
+ return -EINVAL;
+
+ *type = adap->ch_thermal.trip_type;
+ return 0;
+}
+
+static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int *temp)
+{
+ struct adapter *adap = tzdev->devdata;
+
+ if (!adap->ch_thermal.trip_temp)
+ return -EINVAL;
+
+ *temp = adap->ch_thermal.trip_temp;
+ return 0;
+}
+
+static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+ .get_temp = cxgb4_thermal_get_temp,
+ .get_trip_type = cxgb4_thermal_get_trip_type,
+ .get_trip_temp = cxgb4_thermal_get_trip_temp,
+};
+
+int cxgb4_thermal_init(struct adapter *adap)
+{
+ struct ch_thermal *ch_thermal = &adap->ch_thermal;
+ int num_trip = CXGB4_NUM_TRIPS;
+ u32 param, val;
+ int ret;
+
+ /* on older firmwares we may not get the trip temperature,
+ * set the num of trips to 0.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_MAXTMPTHRESH));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0) {
+ num_trip = 0; /* could not get trip temperature */
+ } else {
+ ch_thermal->trip_temp = val * 1000;
+ ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
+ }
+
+ ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip,
+ 0, adap,
+ &cxgb4_thermal_ops,
+ NULL, 0, 0);
+ if (IS_ERR(ch_thermal->tzdev)) {
+ ret = PTR_ERR(ch_thermal->tzdev);
+ dev_err(adap->pdev_dev, "Failed to register thermal zone\n");
+ ch_thermal->tzdev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+int cxgb4_thermal_remove(struct adapter *adap)
+{
+ if (adap->ch_thermal.tzdev)
+ thermal_zone_device_unregister(adap->ch_thermal.tzdev);
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 6d2bc8789223..57584ab32043 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1332,6 +1332,7 @@ enum fw_params_param_dev_phyfw {
enum fw_params_param_dev_diag {
FW_PARAM_DEV_DIAG_TMP = 0x00,
FW_PARAM_DEV_DIAG_VDD = 0x01,
+ FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02,
};
enum fw_params_param_dev_fwcache {
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index a7f365db0eed..809a155eb193 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -1,7 +1,6 @@
config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
- depends on ARCH_LAYERSCAPE || COMPILE_TEST
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 156080d42a6c..88f7acce38dc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -98,8 +98,7 @@ free_buf:
}
/* Build a linear skb based on a single-buffer frame descriptor */
-static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
void *fd_vaddr)
{
@@ -233,7 +232,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
- skb = build_linear_skb(priv, ch, fd, vaddr);
+ skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
skb = build_frag_skb(priv, ch, buf_data);
skb_free_frag(vaddr);
@@ -438,7 +437,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
- dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, sgt_buf);
@@ -491,7 +490,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, buffer_start);
@@ -660,7 +659,7 @@ err_alloc_headroom:
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
struct napi_struct *napi __always_unused,
u16 queue_id __always_unused)
@@ -935,7 +934,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_channel *ch;
struct dpaa2_eth_priv *priv;
int rx_cleaned = 0, txconf_cleaned = 0;
- enum dpaa2_eth_fq_type type;
+ enum dpaa2_eth_fq_type type = 0;
int store_cleaned;
int err;
@@ -1002,7 +1001,7 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
static int link_state_update(struct dpaa2_eth_priv *priv)
{
- struct dpni_link_state state;
+ struct dpni_link_state state = {0};
int err;
err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
@@ -1085,8 +1084,7 @@ enable_err:
/* The DPIO store must be empty when we call this,
* at the end of every NAPI cycle.
*/
-static u32 drain_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch)
+static u32 drain_channel(struct dpaa2_eth_channel *ch)
{
u32 drained = 0, total = 0;
@@ -1107,7 +1105,7 @@ static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- drained += drain_channel(priv, ch);
+ drained += drain_channel(ch);
}
return drained;
@@ -1116,7 +1114,7 @@ static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
static int dpaa2_eth_stop(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int dpni_enabled;
+ int dpni_enabled = 0;
int retries = 10;
u32 drained;
@@ -2172,8 +2170,8 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
-int dpaa2_eth_set_dist_key(struct net_device *net_dev,
- enum dpaa2_eth_rx_dist type, u64 flags)
+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+ enum dpaa2_eth_rx_dist type, u64 flags)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 1b49c5d3340b..c3bd2a10bc7d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -479,6 +479,7 @@ struct hnae3_knic_private_info {
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
+ enum pkt_hash_types rss_type;
};
struct hnae3_roce_private_info {
@@ -502,6 +503,15 @@ struct hnae3_unic_private_info {
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
+#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
+#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
+#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
+#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
+#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
+#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
+#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
+
struct hnae3_handle {
struct hnae3_client *client;
struct pci_dev *pdev;
@@ -520,6 +530,8 @@ struct hnae3_handle {
};
u32 numa_node_mask; /* for multi-chip support */
+
+ u8 netdev_flags;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index e9d4564b8ce1..ce93fcce2732 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -459,23 +459,81 @@ static int hns3_nic_mc_unsync(struct net_device *netdev,
return 0;
}
+static u8 hns3_get_netdev_flags(struct net_device *netdev)
+{
+ u8 flags = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
+ } else {
+ flags |= HNAE3_VLAN_FLTR;
+ if (netdev->flags & IFF_ALLMULTI)
+ flags |= HNAE3_USER_MPE;
+ }
+
+ return flags;
+}
+
static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 new_flags;
+ int ret;
- if (h->ae_algo->ops->set_promisc_mode) {
- if (netdev->flags & IFF_PROMISC)
- h->ae_algo->ops->set_promisc_mode(h, true, true);
- else if (netdev->flags & IFF_ALLMULTI)
- h->ae_algo->ops->set_promisc_mode(h, false, true);
- else
- h->ae_algo->ops->set_promisc_mode(h, false, false);
- }
- if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
+ new_flags = hns3_get_netdev_flags(netdev);
+
+ ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ if (ret) {
netdev_err(netdev, "sync uc address fail\n");
+ if (ret == -ENOSPC)
+ new_flags |= HNAE3_OVERFLOW_UPE;
+ }
+
if (netdev->flags & IFF_MULTICAST) {
- if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
+ ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
+ hns3_nic_mc_unsync);
+ if (ret) {
netdev_err(netdev, "sync mc address fail\n");
+ if (ret == -ENOSPC)
+ new_flags |= HNAE3_OVERFLOW_MPE;
+ }
+ }
+
+ hns3_update_promisc_mode(netdev, new_flags);
+ /* User mode Promisc mode enable and vlan filtering is disabled to
+ * let all packets in. MAC-VLAN Table overflow Promisc enabled and
+ * vlan fitering is enabled
+ */
+ hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
+ h->netdev_flags = new_flags;
+}
+
+void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (h->ae_algo->ops->set_promisc_mode) {
+ h->ae_algo->ops->set_promisc_mode(h,
+ promisc_flags & HNAE3_UPE,
+ promisc_flags & HNAE3_MPE);
+ }
+}
+
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool last_state;
+
+ if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
+ last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
+ if (enable != last_state) {
+ netdev_info(netdev,
+ "%s vlan filter\n",
+ enable ? "enable" : "disable");
+ h->ae_algo->ops->enable_vlan_filter(h, enable);
+ }
}
}
@@ -2230,6 +2288,21 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ struct hnae3_handle *handle = ring->tqp->handle;
+ enum pkt_hash_types rss_type;
+
+ if (le32_to_cpu(desc->rx.rss_hash))
+ rss_type = handle->kinfo.rss_type;
+ else
+ rss_type = PKT_HASH_TYPE_NONE;
+
+ skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
+}
+
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb, int *out_bnum)
{
@@ -2371,6 +2444,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
hns3_rx_checksum(ring, skb, desc);
+ hns3_set_rx_skb_rss_type(ring, skb);
+
return 0;
}
@@ -3590,11 +3665,15 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ bool vlan_filter_enable;
int ret;
hns3_init_mac_addr(netdev, false);
- hns3_nic_set_rx_mode(netdev);
hns3_recover_hw_addr(netdev);
+ hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(netdev, vlan_filter_enable);
+
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index ac881e8fc05d..f25b281ff2aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -640,6 +640,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
+void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 7d79a074a214..a4762c2b8ba1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -71,6 +71,7 @@ struct hns3_link_mode_mapping {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ bool vlan_filter_enable;
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -91,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
if (ret)
return ret;
- h->ae_algo->ops->set_promisc_mode(h, en, en);
+ if (en) {
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ } else {
+ /* recover promisc mode before loopback test */
+ hns3_update_promisc_mode(ndev, h->netdev_flags);
+ vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(ndev, vlan_filter_enable);
+ }
return ret;
}
@@ -678,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
- /* currently we only support Toeplitz hash */
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
- netdev_err(netdev,
- "hash func not supported (only Toeplitz hash)\n");
+ if ((h->pdev->revision == 0x20 &&
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
+
if (!indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
@@ -1077,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ca1a93664d0e..1bd83e8268fc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2773,6 +2773,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
return ret;
}
+static void hclge_get_rss_type(struct hclge_vport *vport)
+{
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
+ vport->rss_tuple_sets.ipv4_udp_en ||
+ vport->rss_tuple_sets.ipv4_sctp_en ||
+ vport->rss_tuple_sets.ipv6_tcp_en ||
+ vport->rss_tuple_sets.ipv6_udp_en ||
+ vport->rss_tuple_sets.ipv6_sctp_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
+ vport->rss_tuple_sets.ipv6_fragment_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
struct hclge_rss_input_tuple_cmd *req;
@@ -2792,6 +2808,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
+ hclge_get_rss_type(&hdev->vport[0]);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -2806,8 +2823,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
int i;
/* Get hash algorithm */
- if (hfunc)
- *hfunc = vport->rss_algo;
+ if (hfunc) {
+ switch (vport->rss_algo) {
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
/* Get the RSS Key required by the user */
if (key)
@@ -2831,12 +2859,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
/* Set the RSS Hash Key if specififed by the user */
if (key) {
-
- if (hfunc == ETH_RSS_HASH_TOP ||
- hfunc == ETH_RSS_HASH_NO_CHANGE)
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- else
+ break;
+ case ETH_RSS_HASH_XOR:
+ hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ hash_algo = vport->rss_algo;
+ break;
+ default:
return -EINVAL;
+ }
+
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
@@ -2954,6 +2990,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ hclge_get_rss_type(vport);
return 0;
}
@@ -5569,6 +5606,10 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS_V1_B, enable);
}
+ if (enable)
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+ else
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
@@ -5865,7 +5906,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
#define HCLGE_DEF_VLAN_TYPE 0x8100
- struct hnae3_handle *handle;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
@@ -5888,6 +5929,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -5932,7 +5975,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
- handle = &hdev->vport[0].nic;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 19b32860309c..bc294b0c8b62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -89,6 +89,7 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
@@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
-#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
+#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
u8 hash_config;
@@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd {
struct hclgevf_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
- u8 ipv4_stcp_en;
+ u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
- u8 ipv6_stcp_en;
+ u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index ca4a9f790917..ac67fecb9408 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -386,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
+static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
+ const u8 hfunc, const u8 *key)
+{
+ struct hclgevf_rss_config_cmd *req;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int ret;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+
+ for (key_offset = 0; key_offset < 3; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ memcpy(req->hash_key,
+ key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Configure RSS config fail, status = %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGEVF_RSS_KEY_SIZE;
@@ -466,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
return status;
}
-static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
- u8 *key)
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_config_cmd *req;
- int lkup_times = key ? 3 : 1;
- struct hclgevf_desc desc;
- int key_offset;
- int key_size;
- int status;
-
- req = (struct hclgevf_rss_config_cmd *)desc.data;
- lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
-
- for (key_offset = 0; key_offset < lkup_times; key_offset++) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
- true);
- req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "failed to get hardware RSS cfg, status = %d\n",
- status);
- return status;
+ if (handle->pdev->revision >= 0x21) {
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->hash_algo) {
+ case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
}
- if (key_offset == 2)
- key_size =
- HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGEVF_RSS_HASH_KEY_NUM;
-
+ /* Get the RSS Key required by the user */
if (key)
- memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
- req->hash_key,
- key_size);
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
}
- if (hash) {
- if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
- *hash = ETH_RSS_HASH_TOP;
- else
- *hash = ETH_RSS_HASH_UNKNOWN;
- }
-
- return 0;
-}
-
-static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
-
if (indir)
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
- return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+ return 0;
}
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
@@ -535,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
+ int ret, i;
+
+ if (handle->pdev->revision >= 0x21) {
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ rss_cfg->hash_algo =
+ HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ break;
+ case ETH_RSS_HASH_XOR:
+ rss_cfg->hash_algo =
+ HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key,
+ HCLGEVF_RSS_KEY_SIZE);
+ }
+ }
/* update the shadow RSS table with user specified qids */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
@@ -545,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
return hclgevf_set_rss_indir_table(hdev);
}
+static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGEVF_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGEVF_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGEVF_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGEVF_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclgevf_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 tuple_sets;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGEVF_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGEVF_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGEVF_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGEVF_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
+static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
+ struct hclgevf_rss_cfg *rss_cfg)
+{
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Configure rss input fail, status = %d\n", ret);
+ return ret;
+}
+
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1276,6 +1505,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->rss_size_max;
+ if (hdev->pdev->revision >= 0x21) {
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ netdev_rss_key_fill(rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+
+ ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ if (ret)
+ return ret;
+
+ }
+
/* Initialize RSS indirect table for each vport */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
@@ -1918,6 +2180,8 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
+ .get_rss_tuple = hclgevf_get_rss_tuple,
+ .set_rss_tuple = hclgevf_set_rss_tuple,
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cf5fbf793c5e..aed241e8ffab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -46,6 +46,13 @@
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGEVF_D_PORT_BIT BIT(0)
+#define HCLGEVF_S_PORT_BIT BIT(1)
+#define HCLGEVF_D_IP_BIT BIT(2)
+#define HCLGEVF_S_IP_BIT BIT(3)
+#define HCLGEVF_V_TAG_BIT BIT(4)
/* states of hclgevf device & tasks */
enum hclgevf_states {
@@ -106,12 +113,24 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
+struct hclgevf_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
struct hclgevf_rss_cfg {
u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
struct hclgevf_misc_vector {
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f33fd22b351c..3238aa7f5dac 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -167,4 +167,7 @@ config SKY2_DEBUG
If unsure, say N.
+
+source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 55d4d10aa7d3..89dea7284d5b 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeontx2/
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
new file mode 100644
index 000000000000..35827bdf1878
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -0,0 +1,17 @@
+#
+# Marvell OcteonTX2 drivers configuration
+#
+
+config OCTEONTX2_MBOX
+ tristate
+
+config OCTEONTX2_AF
+ tristate "Marvell OcteonTX2 RVU Admin Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 Resource Virtualization
+ Unit's admin function manager which manages all RVU HW resources
+ and provides a medium to other PF/VFs to configure HW. Should be
+ enabled for other RVU device drivers to work.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..e579dcd54c97
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell OcteonTX2 device drivers.
+#
+
+obj-$(CONFIG_OCTEONTX2_AF) += af/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 000000000000..eaac26424486
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+#
+
+obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+
+octeontx2_mbox-y := mbox.o
+octeontx2_af-y := cgx.o rvu.o rvu_cgx.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 000000000000..5328ecc8cea2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+
+#define DRV_NAME "octeontx2-cgx"
+#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
+
+/**
+ * struct lmac
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @event_cb: callback for linkchange events
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @cgx: parent cgx port
+ * @lmac_id: lmac port id
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_event_cb event_cb;
+ bool cmd_pend;
+ struct cgx *cgx;
+ u8 lmac_id;
+ char *name;
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct list_head cgx_list;
+};
+
+static LIST_HEAD(cgx_list);
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << 18) + offset);
+}
+
+static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << 18) + offset);
+}
+
+static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+ if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+ return NULL;
+
+ return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgx_cnt(void)
+{
+ struct cgx *cgx_dev;
+ int count = 0;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL(cgx_get_cgx_cnt);
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+EXPORT_SYMBOL(cgx_get_lmac_cnt);
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cgx_get_pdata);
+
+/* CGX Firmware interface low level support */
+static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct device *dev;
+ int err = 0;
+ u64 cmd;
+
+ /* Ensure no other command is in progress */
+ err = mutex_lock_interruptible(&lmac->cmd_lock);
+ if (err)
+ return err;
+
+ /* Ensure command register is free */
+ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
+ if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /* Update ownership in command request */
+ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+ /* Mark this lmac as pending, before we start */
+ lmac->cmd_pend = true;
+
+ /* Start command in hardware */
+ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+ /* Ensure command is completed without errors */
+ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+ msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+ dev = &cgx->pdev->dev;
+ dev_err(dev, "cgx port %d:%d cmd timeout\n",
+ cgx->cgx_id, lmac->lmac_id);
+ err = -EIO;
+ goto unlock;
+ }
+
+ /* we have a valid command response */
+ smp_rmb(); /* Ensure the latest updates are visible */
+ *resp = lmac->resp;
+
+unlock:
+ mutex_unlock(&lmac->cmd_lock);
+
+ return err;
+}
+
+static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
+ struct cgx *cgx, int lmac_id)
+{
+ struct lmac *lmac;
+ int err;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ err = cgx_fwi_cmd_send(req, resp, lmac);
+
+ /* Check for valid response */
+ if (!err) {
+ if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return err;
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+ struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct cgx_link_event event;
+ struct device *dev;
+
+ dev = &cgx->pdev->dev;
+
+ event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+ event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+ event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat);
+ event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+ event.cgx_id = cgx->cgx_id;
+ event.lmac_id = lmac->lmac_id;
+
+ if (!lmac->event_cb.notify_link_chg) {
+ dev_dbg(dev, "cgx port %d:%d Link change handler null",
+ cgx->cgx_id, lmac->lmac_id);
+ if (event.lstat.err_type != CGX_ERR_NONE) {
+ dev_err(dev, "cgx port %d:%d Link error %d\n",
+ cgx->cgx_id, lmac->lmac_id,
+ event.lstat.err_type);
+ }
+ dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n",
+ cgx->cgx_id, lmac->lmac_id,
+ event.lstat.link_up ? "UP" : "DOWN",
+ event.lstat.speed);
+ return;
+ }
+
+ if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+ dev_err(dev, "event notification failure\n");
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+ u8 id;
+
+ id = FIELD_GET(EVTREG_ID, event);
+ if (id == CGX_CMD_LINK_BRING_UP ||
+ id == CGX_CMD_LINK_BRING_DOWN)
+ return true;
+ else
+ return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+ if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+ struct lmac *lmac = data;
+ struct cgx *cgx;
+ u64 event;
+
+ cgx = lmac->cgx;
+
+ event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+ if (!FIELD_GET(EVTREG_ACK, event))
+ return IRQ_NONE;
+
+ switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+ case CGX_EVT_CMD_RESP:
+ /* Copy the response. Since only one command is active at a
+ * time, there is no way a response can get overwritten
+ */
+ lmac->resp = event;
+ /* Ensure response is updated before thread context starts */
+ smp_wmb();
+
+ /* There wont be separate events for link change initiated from
+ * software; Hence report the command responses as events
+ */
+ if (cgx_cmdresp_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+ wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+ break;
+ }
+
+ /* Any new event or command response will be posted by firmware
+ * only after the current status is acked.
+ * Ack the interrupt register as well.
+ */
+ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+ cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+
+ return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ lmac->event_cb = *cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_register);
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+ u64 req = 0;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+ return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+ struct device *dev = &cgx->pdev->dev;
+ int major_ver, minor_ver;
+ u64 resp;
+ int err;
+
+ if (!cgx->lmac_count)
+ return 0;
+
+ err = cgx_fwi_read_version(&resp, cgx);
+ if (err)
+ return err;
+
+ major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+ minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+ dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+ major_ver, minor_ver);
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
+ minor_ver != CGX_FIRMWARE_MINOR_VER)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i, err;
+
+ cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+ cgx->lmac_count = MAX_LMAC_PER_CGX;
+
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ if (!lmac)
+ return -ENOMEM;
+ lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+ if (!lmac->name)
+ return -ENOMEM;
+ sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ lmac->lmac_id = i;
+ lmac->cgx = cgx;
+ init_waitqueue_head(&lmac->wq_cmd_cmplt);
+ mutex_init(&lmac->cmd_lock);
+ err = request_irq(pci_irq_vector(cgx->pdev,
+ CGX_LMAC_FWI + i * 9),
+ cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+ FW_CGX_INT);
+
+ /* Add reference */
+ cgx->lmac_idmap[i] = lmac;
+ }
+
+ return cgx_lmac_verify_fwi_version(cgx);
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i;
+
+ /* Free all lmac related resources */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = cgx->lmac_idmap[i];
+ if (!lmac)
+ continue;
+ free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ kfree(lmac->name);
+ kfree(lmac);
+ }
+
+ return 0;
+}
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cgx *cgx;
+ int err, nvec;
+
+ cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+ if (!cgx)
+ return -ENOMEM;
+ cgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, cgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!cgx->reg_base) {
+ dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ nvec = CGX_NVEC;
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0 || err != nvec) {
+ dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+ nvec, err);
+ goto err_release_regions;
+ }
+
+ list_add(&cgx->cgx_list, &cgx_list);
+ cgx->cgx_id = cgx_get_cgx_cnt() - 1;
+
+ err = cgx_lmac_init(cgx);
+ if (err)
+ goto err_release_lmac;
+
+ return 0;
+
+err_release_lmac:
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+ .name = DRV_NAME,
+ .id_table = cgx_id_table,
+ .probe = cgx_probe,
+ .remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 000000000000..a2a7a6d72d32
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "cgx_fw_if.h"
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define MAX_CGX 3
+#define MAX_LMAC_PER_CGX 4
+#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+
+/* Registers */
+#define CGXX_CMRX_INT 0x040
+#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S 0x058
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_LMACS 0x128
+#define CGXX_SCRATCH0_REG 0x1050
+#define CGXX_SCRATCH1_REG 0x1058
+#define CGX_CONST 0x2000
+
+#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT 2200 /* msecs */
+
+#define CGX_NVEC 37
+#define CGX_LMAC_FWI 0
+
+struct cgx_link_event {
+ struct cgx_lnk_sts lstat;
+ u8 cgx_id;
+ u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg: callback for link change notification
+ * @data: data passed to callback function
+ */
+struct cgx_event_cb {
+ int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+ void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgx_cnt(void);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 000000000000..fa17af3f4ba7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER 1
+#define CGX_FIRMWARE_MINOR_VER 0
+
+#define CGX_EVENT_ACK 1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+ CGX_ERR_NONE,
+ CGX_ERR_LMAC_NOT_ENABLED,
+ CGX_ERR_LMAC_MODE_INVALID,
+ CGX_ERR_REQUEST_ID_INVALID,
+ CGX_ERR_PREV_ACK_NOT_CLEAR,
+ CGX_ERR_PHY_LINK_DOWN,
+ CGX_ERR_PCS_RESET_FAIL,
+ CGX_ERR_AN_CPT_FAIL,
+ CGX_ERR_TX_NOT_IDLE,
+ CGX_ERR_RX_NOT_IDLE,
+ CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+ CGX_ERR_SPUX_RX_ALIGN_FAIL,
+ CGX_ERR_SPUX_TX_FAULT,
+ CGX_ERR_SPUX_RX_FAULT,
+ CGX_ERR_SPUX_RESET_FAIL,
+ CGX_ERR_SPUX_AN_RESET_FAIL,
+ CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+ CGX_ERR_SMUX_RX_LINK_NOT_OK,
+ CGX_ERR_PCS_RECV_LINK_FAIL,
+ CGX_ERR_TRAINING_FAIL,
+ CGX_ERR_RX_EQU_FAIL,
+ CGX_ERR_SPUX_BER_FAIL,
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+ CGX_LINK_NONE,
+ CGX_LINK_10M,
+ CGX_LINK_100M,
+ CGX_LINK_1G,
+ CGX_LINK_2HG,
+ CGX_LINK_5G,
+ CGX_LINK_10G,
+ CGX_LINK_20G,
+ CGX_LINK_25G,
+ CGX_LINK_40G,
+ CGX_LINK_50G,
+ CGX_LINK_100G,
+ CGX_LINK_SPEED_MAX,
+};
+
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+ CGX_CMD_NONE,
+ CGX_CMD_GET_FW_VER,
+ CGX_CMD_GET_MAC_ADDR,
+ CGX_CMD_SET_MTU,
+ CGX_CMD_GET_LINK_STS, /* optional to user */
+ CGX_CMD_LINK_BRING_UP,
+ CGX_CMD_LINK_BRING_DOWN,
+ CGX_CMD_INTERNAL_LBK,
+ CGX_CMD_EXTERNAL_LBK,
+ CGX_CMD_HIGIG,
+ CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_MODE_CHANGE, /* hot plug support */
+ CGX_CMD_INTF_SHUTDOWN,
+ CGX_CMD_IRQ_ENABLE,
+ CGX_CMD_IRQ_DISABLE,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+ CGX_EVT_NONE,
+ CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+ CGX_EVT_ASYNC,
+ CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+ CGX_STAT_SUCCESS,
+ CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+ CGX_CMD_OWN_NS,
+ CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x) \
+ (((x) & ~(m)) | \
+ FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK BIT_ULL(0)
+#define EVTREG_EVT_TYPE BIT_ULL(1)
+#define EVTREG_STAT BIT_ULL(2)
+#define EVTREG_ID GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+ uint64_t reserved1:9;
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t speed:4; /* cgx_link_speed */
+ uint64_t err_type:10;
+ uint64_t reserved2:39;
+};
+
+#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN BIT_ULL(0)
+#define CMDREG_ID GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 000000000000..85ba24a05774
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->num_msgs = 0;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = NULL;
+ mbox->hwbase = NULL;
+
+ kfree(mbox->dev);
+ mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->reg_base = reg_base;
+ mbox->hwbase = hwbase;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+
+ mbox->ndevs = ndevs;
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int timeout = 0, sleep = 1;
+
+ while (mdev->num_msgs != mdev->msgs_acked) {
+ msleep(sleep);
+ timeout += sleep;
+ if (timeout >= MBOX_RSP_TIMEOUT)
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ unsigned long timeout = jiffies + 1 * HZ;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ cpu_relax();
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ /* Reset header for next messages */
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after writing all the messages
+ * to the shared memory.
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ writeq(1, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ spin_lock(&mdev->mbox_lock);
+ size = ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+ goto exit;
+
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+ /* Clear the whole msg region */
+ memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg)
+{
+ unsigned long imsg = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ u16 msgs;
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ return ERR_PTR(-ENODEV);
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (msg == pmsg) {
+ if (pmsg->id != prsp->id)
+ return ERR_PTR(-ENODEV);
+ return prsp;
+ }
+
+ imsg = pmsg->next_msgoff;
+ irsp = prsp->next_msgoff;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ spin_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2) case _id: return # _name;
+ MBOX_MESSAGES
+#undef M
+ default:
+ return "INVALID ID";
+ }
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 000000000000..bedf0ee62450
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ spinlock_t mbox_lock;
+ u16 msg_size; /* Total msg size to be sent */
+ u16 rsp_size; /* Total rsp size to be sure the reply is ok */
+ u16 num_msgs; /* No of msgs sent or waiting for response */
+ u16 msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ struct pci_dev *pdev;
+ void *hwbase; /* Mbox region advertised by HW */
+ void *reg_base;/* CSR base for this dev */
+ u64 trigger; /* Trigger mbox notification */
+ u16 tr_shift; /* Mbox trigger shift */
+ u64 rx_start; /* Offset of Rx region in mbox memory */
+ u64 tx_start; /* Offset of Tx region in mbox memory */
+ u16 rx_size; /* Size of Rx region */
+ u16 tx_size; /* Size of Tx region */
+ u16 ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+};
+
+/* Header which preceeds all mbox messages */
+struct mbox_hdr {
+ u16 num_msgs; /* No of msgs embedded */
+};
+
+/* Header which preceeds every msg and is also part of it */
+struct mbox_msghdr {
+ u16 pcifunc; /* Who's sending this msg */
+ u16 id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ u16 sig; /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x0001)
+ u16 ver; /* Version of msg's structure for this ID */
+ u16 next_msgoff; /* Offset of next msg within mailbox region */
+ int rc; /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+ u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+
+enum {
+#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic rsponse msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ u16 sclk_feq; /* SCLK frequency */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a cetain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ */
+struct rsrc_attach {
+ struct mbox_msghdr hdr;
+ u8 modify:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+ struct mbox_msghdr hdr;
+ u8 partial:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u8 sso:1;
+ u8 ssow:1;
+ u8 timlfs:1;
+ u8 cptlfs:1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ u16 npa_msixoff;
+ u16 nix_msixoff;
+ u8 sso;
+ u8 ssow;
+ u8 timlfs;
+ u8 cptlfs;
+ u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 000000000000..2033f42c3226
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,1637 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define DRV_NAME "octeontx2-af"
+#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_VERSION "1.0"
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+ void __iomem *reg;
+ int timeout = 100;
+ u64 reg_val;
+
+ reg = rvu->afreg_base + ((block << 28) | offset);
+ while (timeout) {
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1, 2);
+ timeout--;
+ }
+ return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+ int id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+ if (id >= rsrc->max)
+ return -ENOSPC;
+
+ __set_bit(id, rsrc->bmap);
+
+ return id;
+}
+
+static int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, start, nrsrc);
+ return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+ if (!rsrc->bmap)
+ return;
+ if (start >= rsrc->max)
+ return;
+
+ bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+static bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return false;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return false;
+
+ return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return;
+
+ __clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+ int used;
+
+ if (!rsrc->bmap)
+ return 0;
+
+ used = bitmap_weight(rsrc->bmap, rsrc->max);
+ return (rsrc->max - used);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+ rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+ sizeof(long), GFP_KERNEL);
+ if (!rsrc->bmap)
+ return -ENOMEM;
+ return 0;
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+ u16 match = 0;
+ int lf;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (block->fn_map[lf] == pcifunc) {
+ if (slot == match) {
+ spin_unlock(&rvu->rsrc_lock);
+ return lf;
+ }
+ match++;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+ int devnum, blkaddr = -ENODEV;
+ u64 cfg, reg;
+ bool is_pf;
+
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ goto exit;
+ case BLKTYPE_NIX:
+ /* For now assume NIX0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+ break;
+ case BLKTYPE_SSO:
+ blkaddr = BLKADDR_SSO;
+ goto exit;
+ case BLKTYPE_SSOW:
+ blkaddr = BLKADDR_SSOW;
+ goto exit;
+ case BLKTYPE_TIM:
+ blkaddr = BLKADDR_TIM;
+ goto exit;
+ case BLKTYPE_CPT:
+ /* For now assume CPT0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+ break;
+ }
+
+ /* Check if this is a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ if (blktype == BLKTYPE_NIX) {
+ reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
+ if (blktype == BLKTYPE_CPT) {
+ reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT0;
+ }
+
+exit:
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+ return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, u16 pcifunc,
+ u16 lf, bool attach)
+{
+ int devnum, num_lfs = 0;
+ bool is_pf;
+ u64 reg;
+
+ if (lf >= block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+ __func__, lf, block->name, block->lf.max);
+ return;
+ }
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ block->fn_map[lf] = attach ? pcifunc : 0;
+
+ switch (block->type) {
+ case BLKTYPE_NPA:
+ pfvf->npalf = attach ? true : false;
+ num_lfs = pfvf->npalf;
+ break;
+ case BLKTYPE_NIX:
+ pfvf->nixlf = attach ? true : false;
+ num_lfs = pfvf->nixlf;
+ break;
+ case BLKTYPE_SSO:
+ attach ? pfvf->sso++ : pfvf->sso--;
+ num_lfs = pfvf->sso;
+ break;
+ case BLKTYPE_SSOW:
+ attach ? pfvf->ssow++ : pfvf->ssow--;
+ num_lfs = pfvf->ssow;
+ break;
+ case BLKTYPE_TIM:
+ attach ? pfvf->timlfs++ : pfvf->timlfs--;
+ num_lfs = pfvf->timlfs;
+ break;
+ case BLKTYPE_CPT:
+ attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+ num_lfs = pfvf->cptlfs;
+ break;
+ }
+
+ reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+ rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+ u64 cfg;
+
+ /* Get numVFs attached to this PF and first HWVF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ *numvfs = (cfg >> 12) & 0xFF;
+ *hwvf = cfg & 0xFFF;
+}
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+ int pf, func;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* Get first HWVF attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+ return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+ /* Check if it is a PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+ struct rvu_block *block;
+
+ if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+ return false;
+
+ block = &hw->block[blkaddr];
+ return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* For each block check if 'implemented' bit is set */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+ if (cfg & BIT_ULL(11))
+ block->implemented = true;
+ }
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+
+ if (!block->implemented)
+ return;
+
+ rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+ rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+ /* Do a HW reset of all RVU blocks */
+ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+ struct rvu_pfvf *pfvf;
+ u64 cfg;
+ int lf;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ cfg = rvu_read64(rvu, block->addr,
+ block->lfcfg_reg | (lf << block->lfshift));
+ if (!(cfg & BIT_ULL(63)))
+ continue;
+
+ /* Set this resource as being used */
+ __set_bit(lf, block->lf.bmap);
+
+ /* Get, to whom this LF is attached */
+ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ (cfg >> 8) & 0xFFFF, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+ int min_vecs;
+
+ if (!vf)
+ goto check_pf;
+
+ if (!nvecs) {
+ dev_warn(rvu->dev,
+ "PF%d:VF%d is configured with zero msix vectors, %d\n",
+ pf, vf - 1, nvecs);
+ }
+ return;
+
+check_pf:
+ if (pf == 0)
+ min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+ else
+ min_vecs = RVU_PF_INT_VEC_CNT;
+
+ if (!(nvecs < min_vecs))
+ return;
+ dev_warn(rvu->dev,
+ "PF%d is configured with too few vectors, %d, min is %d\n",
+ pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf, err;
+ int nvecs, offset, max_msix;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, phy_addr;
+ dma_addr_t iova;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ pfvf = &rvu->pf[pf];
+ /* Get num of MSIX vectors attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+ /* Alloc msix bitmap for this PF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ /* Allocate memory for MSIX vector to RVU block LF mapping */
+ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* For PF0 (AF) firmware will set msix vector offsets for
+ * AF, block AF and PF0_INT vectors, so jump to VFs.
+ */
+ if (!pf)
+ goto setup_vfmsix;
+
+ /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+ /* Alloc msix bitmap for VFs */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ /* Get num of MSIX vectors attached to this VF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = (cfg & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+ /* Alloc msix bitmap for this VF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ pfvf->msix_lfmap =
+ devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+ cfg | offset);
+ }
+ }
+
+ /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+ * create a IOMMU mapping for the physcial address configured by
+ * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ iova = dma_map_resource(rvu->dev, phy_addr,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ if (dma_mapping_error(rvu->dev, iova))
+ return -ENOMEM;
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+ rvu->msix_base_iova = iova;
+
+ return 0;
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int id, max_msix;
+ u64 cfg;
+
+ /* Free block LF bitmaps */
+ for (id = 0; id < BLK_COUNT; id++) {
+ block = &hw->block[id];
+ kfree(block->lf.bmap);
+ }
+
+ /* Free MSIX bitmaps */
+ for (id = 0; id < hw->total_pfs; id++) {
+ pfvf = &rvu->pf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ for (id = 0; id < hw->total_vfs; id++) {
+ pfvf = &rvu->hwvf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ /* Unmap MSIX vector base IOVA mapping */
+ if (!rvu->msix_base_iova)
+ return;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid, err;
+ u64 cfg;
+
+ /* Get HW supported max RVU PF & VF count */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ hw->total_pfs = (cfg >> 32) & 0xFF;
+ hw->total_vfs = (cfg >> 20) & 0xFFF;
+ hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+ /* Init NPA LF's bitmap */
+ block = &hw->block[BLKADDR_NPA];
+ if (!block->implemented)
+ goto nix;
+ cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+ block->lf.max = (cfg >> 16) & 0xFFF;
+ block->addr = BLKADDR_NPA;
+ block->type = BLKTYPE_NPA;
+ block->lfshift = 8;
+ block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+ block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+ block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NPA_AF_LF_RST;
+ sprintf(block->name, "NPA");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+nix:
+ /* Init NIX LF's bitmap */
+ block = &hw->block[BLKADDR_NIX0];
+ if (!block->implemented)
+ goto sso;
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = BLKADDR_NIX0;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ sprintf(block->name, "NIX");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+sso:
+ /* Init SSO group's bitmap */
+ block = &hw->block[BLKADDR_SSO];
+ if (!block->implemented)
+ goto ssow;
+ cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_SSO;
+ block->type = BLKTYPE_SSO;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+ block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+ block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+ block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ sprintf(block->name, "SSO GROUP");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+ssow:
+ /* Init SSO workslot's bitmap */
+ block = &hw->block[BLKADDR_SSOW];
+ if (!block->implemented)
+ goto tim;
+ block->lf.max = (cfg >> 56) & 0xFF;
+ block->addr = BLKADDR_SSOW;
+ block->type = BLKTYPE_SSOW;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+ block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+ block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+ block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ sprintf(block->name, "SSOWS");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+tim:
+ /* Init TIM LF's bitmap */
+ block = &hw->block[BLKADDR_TIM];
+ if (!block->implemented)
+ goto cpt;
+ cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_TIM;
+ block->type = BLKTYPE_TIM;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+ block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+ block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = TIM_AF_LF_RST;
+ sprintf(block->name, "TIM");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+cpt:
+ /* Init CPT LF's bitmap */
+ block = &hw->block[BLKADDR_CPT0];
+ if (!block->implemented)
+ goto init;
+ cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = BLKADDR_CPT0;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ sprintf(block->name, "CPT");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+init:
+ /* Allocate memory for PFVF data */
+ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->pf)
+ return -ENOMEM;
+
+ rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->hwvf)
+ return -ENOMEM;
+
+ spin_lock_init(&rvu->rsrc_lock);
+
+ err = rvu_setup_msix_resources(rvu);
+ if (err)
+ return err;
+
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+
+ /* Allocate memory for block LF/slot to pcifunc mapping info */
+ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!block->fn_map)
+ return -ENOMEM;
+
+ /* Scan all blocks to check if low level firmware has
+ * already provisioned any of the resources to a PF/VF.
+ */
+ rvu_scan_block(rvu, block);
+ }
+
+ return 0;
+}
+
+static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
+{
+ return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+{
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKTYPE_NIX:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKTYPE_SSO:
+ return pfvf->sso;
+ case BLKTYPE_SSOW:
+ return pfvf->ssow;
+ case BLKTYPE_TIM:
+ return pfvf->timlfs;
+ case BLKTYPE_CPT:
+ return pfvf->cptlfs;
+ }
+ return 0;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ int pcifunc, int slot)
+{
+ u64 val;
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+ /* Wait for the lookup to finish */
+ /* TODO: put some timeout here */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ if (!num_lfs)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+ if (lf < 0) /* This should never happen */
+ continue;
+
+ /* Disable the LF */
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), 0x00ULL);
+
+ /* Update SW maintained mapping info as well */
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, false);
+
+ /* Free the resource */
+ rvu_free_rsrc(&block->lf, lf);
+
+ /* Clear MSIX vector offset for this LF */
+ rvu_clear_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+ u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ bool is_pf, detach_all = true;
+ struct rvu_block *block;
+ int devnum, blkid;
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check for partial resource detach */
+ if (detach && detach->partial)
+ detach_all = false;
+
+ /* Check for RVU block's LFs attached to this func,
+ * if so, detach them.
+ */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+ if (!detach_all && detach) {
+ if (blkid == BLKADDR_NPA && !detach->npalf)
+ continue;
+ else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_SSO) && !detach->sso)
+ continue;
+ else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+ continue;
+ else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+ continue;
+ }
+ rvu_detach_block(rvu, pcifunc, block->type);
+ }
+
+ spin_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
+{
+ return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+ int blktype, int num_lfs)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf;
+ int blkaddr;
+ u64 cfg;
+
+ if (!num_lfs)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ /* Allocate the resource */
+ lf = rvu_alloc_rsrc(&block->lf);
+ if (lf < 0)
+ return;
+
+ cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), cfg);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int free_lfs, mappedlfs;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->npalf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NPA\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->nixlf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NIX\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ /* Is request within limits ? */
+ if (req->sso > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSO req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ /* Check if additional resources are available */
+ if (req->sso > mappedlfs &&
+ ((req->sso - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->ssow > mappedlfs &&
+ ((req->ssow - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ if (req->timlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+ pcifunc, req->timlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->timlfs > mappedlfs &&
+ ((req->timlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ block = &hw->block[BLKADDR_CPT0];
+ if (req->cptlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+ pcifunc, req->cptlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->cptlfs > mappedlfs &&
+ ((req->cptlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = attach->hdr.pcifunc;
+ int devnum, err;
+ bool is_pf;
+
+ /* If first request, detach all existing attached resources */
+ if (!attach->modify)
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check if the request can be accommodated */
+ err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+ if (err)
+ goto exit;
+
+ /* Now attach the requested resources */
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
+ if (attach->nixlf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+
+ if (attach->sso) {
+ /* RVU func doesn't know which exact LF or slot is attached
+ * to it, it always sees as slot 0,1,2. So for a 'modify'
+ * request, simply detach all existing attached LFs/slots
+ * and attach a fresh.
+ */
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ }
+
+ if (attach->ssow) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ }
+
+ if (attach->timlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ }
+
+ if (attach->cptlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ }
+
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int lf)
+{
+ u16 vec;
+
+ if (lf < 0)
+ return MSIX_VECTOR_INVALID;
+
+ for (vec = 0; vec < pfvf->msix.max; vec++) {
+ if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+ return vec;
+ }
+ return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Check and alloc MSIX vectors, must be contiguous */
+ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+ return;
+
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+ /* Config MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+ /* Update the bitmap as well */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Clear MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), cfg & ~0x7FFULL);
+
+ offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+ /* Update the mapping */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = 0;
+
+ /* Free the same in MSIX bitmap */
+ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int lf, slot;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->msix.bmap)
+ return 0;
+
+ /* Set MSIX offsets for each block's LFs attached to this PF/VF */
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+ rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+
+ rsp->sso = pfvf->sso;
+ for (slot = 0; slot < rsp->sso; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+ rsp->sso_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+ }
+
+ rsp->ssow = pfvf->ssow;
+ for (slot = 0; slot < rsp->ssow; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+ rsp->ssow_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+ }
+
+ rsp->timlfs = pfvf->timlfs;
+ for (slot = 0; slot < rsp->timlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+ rsp->timlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+ }
+
+ rsp->cptlfs = pfvf->cptlfs;
+ for (slot = 0; slot < rsp->cptlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+ rsp->cptlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+ }
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto bad_message;
+
+ switch (req->id) {
+#define M(_name, _id, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &rvu->mbox, devid, \
+ sizeof(struct _rsp_type)); \
+ if (rsp) { \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = req->pcifunc; \
+ rsp->hdr.rc = 0; \
+ } \
+ \
+ err = rvu_mbox_handler_ ## _name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
+ if (rsp && err) \
+ rsp->hdr.rc = err; \
+ \
+ return rsp ? err : -ENOMEM; \
+ }
+MBOX_MESSAGES
+#undef M
+ break;
+bad_message:
+ default:
+ otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
+ req->id);
+ return -ENODEV;
+ }
+}
+
+static void rvu_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+ u16 pf;
+
+ mbox = &rvu->mbox;
+ pf = mwork - rvu->mbox_wrk;
+ mdev = &mbox->dev[pf];
+
+ /* Process received mbox messages */
+ req_hdr = mdev->mbase + mbox->rx_start;
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ /* Set which PF sent this message based on mbox IRQ */
+ msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
+ err = rvu_process_mbox_msg(rvu, pf, msg);
+ if (!err) {
+ offset = mbox->rx_start + msg->next_msgoff;
+ continue;
+ }
+
+ if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ else
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ }
+
+ /* Send mbox responses to PF */
+ otx2_mbox_msg_send(mbox, pf);
+}
+
+static int rvu_mbox_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ void __iomem *hwbase = NULL;
+ struct rvu_work *mwork;
+ u64 bar4_addr;
+ int err, pf;
+
+ rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ hw->total_pfs);
+ if (!rvu->mbox_wq)
+ return -ENOMEM;
+
+ rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Map mbox region shared with PFs */
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * RVU devices, shouldn't be mapped as device memory to allow
+ * unaligned accesses.
+ */
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ if (!hwbase) {
+ dev_err(rvu->dev, "Unable to map mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_handler);
+ }
+
+ return 0;
+exit:
+ if (hwbase)
+ iounmap((void __iomem *)hwbase);
+ destroy_workqueue(rvu->mbox_wq);
+ return err;
+}
+
+static void rvu_mbox_destroy(struct rvu *rvu)
+{
+ if (rvu->mbox_wq) {
+ flush_workqueue(rvu->mbox_wq);
+ destroy_workqueue(rvu->mbox_wq);
+ rvu->mbox_wq = NULL;
+ }
+
+ if (rvu->mbox.hwbase)
+ iounmap((void __iomem *)rvu->mbox.hwbase);
+
+ otx2_mbox_destroy(&rvu->mbox);
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+ /* Clear interrupts */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+
+ /* Sync with mbox memory region */
+ smp_wmb();
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ mbox = &rvu->mbox;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk[pf].work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+ int irq;
+
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ for (irq = 0; irq < rvu->num_vec; irq++) {
+ if (rvu->irq_allocated[irq])
+ free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+ }
+
+ pci_free_irq_vectors(rvu->pdev);
+ rvu->num_vec = 0;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+ rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+ NAME_SIZE, GFP_KERNEL);
+ if (!rvu->irq_name)
+ return -ENOMEM;
+
+ rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+ sizeof(bool), GFP_KERNEL);
+ if (!rvu->irq_allocated)
+ return -ENOMEM;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+ rvu->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(rvu->dev,
+ "RVUAF: Request for %d msix vectors failed, ret %d\n",
+ rvu->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox irq\n");
+ goto fail;
+ }
+
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+ /* Enable mailbox interrupts from all PFs */
+ rvu_enable_mbox_intr(rvu);
+
+ return 0;
+
+fail:
+ pci_free_irq_vectors(rvu->pdev);
+ return ret;
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rvu *rvu;
+ int err;
+
+ rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+ if (!rvu)
+ return -ENOMEM;
+
+ rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+ if (!rvu->hw) {
+ devm_kfree(dev, rvu);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, rvu);
+ rvu->pdev = pdev;
+ rvu->dev = &pdev->dev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_freemem;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set consistent DMA mask\n");
+ goto err_release_regions;
+ }
+
+ /* Map Admin function CSRs */
+ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+ rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!rvu->afreg_base || !rvu->pfreg_base) {
+ dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Check which blocks the HW supports */
+ rvu_check_block_implemented(rvu);
+
+ rvu_reset_all_blocks(rvu);
+
+ err = rvu_setup_hw_resources(rvu);
+ if (err)
+ goto err_release_regions;
+
+ err = rvu_mbox_init(rvu);
+ if (err)
+ goto err_hwsetup;
+
+ err = rvu_cgx_probe(rvu);
+ if (err)
+ goto err_mbox;
+
+ err = rvu_register_interrupts(rvu);
+ if (err)
+ goto err_cgx;
+
+ return 0;
+err_cgx:
+ rvu_cgx_wq_destroy(rvu);
+err_mbox:
+ rvu_mbox_destroy(rvu);
+err_hwsetup:
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_freemem:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(dev, rvu);
+ return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ rvu_unregister_interrupts(rvu);
+ rvu_cgx_wq_destroy(rvu);
+ rvu_mbox_destroy(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_id_table,
+ .probe = rvu_probe,
+ .remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ err = pci_register_driver(&cgx_driver);
+ if (err < 0)
+ return err;
+
+ err = pci_register_driver(&rvu_driver);
+ if (err < 0)
+ pci_unregister_driver(&cgx_driver);
+
+ return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 000000000000..d169fa9eb45e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include "rvu_struct.h"
+#include "mbox.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_PF_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+struct rvu_work {
+ struct work_struct work;
+ struct rvu *rvu;
+};
+
+struct rsrc_bmap {
+ unsigned long *bmap; /* Pointer to resource bitmap */
+ u16 max; /* Max resource id or count */
+};
+
+struct rvu_block {
+ struct rsrc_bmap lf;
+ u16 *fn_map; /* LF to pcifunc mapping */
+ bool multislot;
+ bool implemented;
+ u8 addr; /* RVU_BLOCK_ADDR_E */
+ u8 type; /* RVU_BLOCK_TYPE_E */
+ u8 lfshift;
+ u64 lookup_reg;
+ u64 pf_lfcnt_reg;
+ u64 vf_lfcnt_reg;
+ u64 lfcfg_reg;
+ u64 msixcfg_reg;
+ u64 lfreset_reg;
+ unsigned char name[NAME_SIZE];
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+ bool npalf; /* Only one NPALF per RVU_FUNC */
+ bool nixlf; /* Only one NIXLF per RVU_FUNC */
+ u16 sso;
+ u16 ssow;
+ u16 cptlfs;
+ u16 timlfs;
+
+ /* Block LF's MSIX vector info */
+ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+ u16 *msix_lfmap; /* Vector to block LF mapping */
+};
+
+struct rvu_hwinfo {
+ u8 total_pfs; /* MAX RVU PFs HW supports */
+ u16 total_vfs; /* Max RVU VFs HW supports */
+ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+
+ struct rvu_block block[BLK_COUNT]; /* Block info */
+};
+
+struct rvu {
+ void __iomem *afreg_base;
+ void __iomem *pfreg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct rvu_hwinfo *hw;
+ struct rvu_pfvf *pf;
+ struct rvu_pfvf *hwvf;
+ spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+
+ /* Mbox */
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+ struct workqueue_struct *mbox_wq;
+
+ /* MSI-X */
+ u16 num_vec;
+ char *irq_name;
+ bool *irq_allocated;
+ dma_addr_t msix_base_iova;
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt; /* available cgx ports */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ void **cgx_idmap; /* cgx id to cgx data map table */
+ struct work_struct cgx_evh_work;
+ struct workqueue_struct *cgx_evh_wq;
+ spinlock_t cgx_evq_lock; /* cgx event queue lock */
+ struct list_head cgx_evq_head; /* cgx event queue head */
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+ writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+ return readq(rvu->pfreg_base + offset);
+}
+
+/* Function Prototypes
+ * RVU
+ */
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+
+/* CGX APIs */
+int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 000000000000..5ecc22308b22
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+
+struct cgx_evq_entry {
+ struct list_head evq_node;
+ struct cgx_link_event link_event;
+};
+
+static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+ return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+static void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+ if (cgx_id >= rvu->cgx_cnt)
+ return NULL;
+
+ return rvu->cgx_idmap[cgx_id];
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+ int cgx_cnt = rvu->cgx_cnt;
+ int cgx, lmac_cnt, lmac;
+ int pf = PF_CGXMAP_BASE;
+ int size;
+
+ if (!cgx_cnt)
+ return 0;
+
+ if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ return -EINVAL;
+
+ /* Alloc map table
+ * An additional entry is required since PF id starts from 1 and
+ * hence entry at offset 0 is invalid.
+ */
+ size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ if (!rvu->pf2cgxlmac_map)
+ return -ENOMEM;
+
+ /* Initialize offset 0 with an invalid cgx and lmac id */
+ rvu->pf2cgxlmac_map[0] = 0xFF;
+
+ /* Reverse map table */
+ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
+ cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ GFP_KERNEL);
+ if (!rvu->cgxlmac2pf_map)
+ return -ENOMEM;
+
+ rvu->cgx_mapped_pfs = 0;
+ for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ rvu->cgx_mapped_pfs++;
+ }
+ }
+ return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+ struct cgx_evq_entry *qentry;
+ struct rvu *rvu = data;
+
+ /* post event to the event queue */
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+ qentry->link_event = *event;
+ spin_lock(&rvu->cgx_evq_lock);
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+ spin_unlock(&rvu->cgx_evq_lock);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+ struct cgx_evq_entry *qentry;
+ struct cgx_link_event *event;
+ unsigned long flags;
+
+ do {
+ /* Dequeue an event */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+ struct cgx_evq_entry,
+ evq_node);
+ if (qentry)
+ list_del(&qentry->evq_node);
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->link_event;
+
+ /* Do nothing for now */
+ kfree(qentry);
+ } while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+ struct cgx_event_cb cb;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ spin_lock_init(&rvu->cgx_evq_lock);
+ INIT_LIST_HEAD(&rvu->cgx_evq_head);
+ INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ if (!rvu->cgx_evh_wq) {
+ dev_err(rvu->dev, "alloc workqueue failed");
+ return;
+ }
+
+ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+ cb.data = rvu;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ if (err)
+ dev_err(rvu->dev,
+ "%d:%d handler register failed\n",
+ cgx, lmac);
+ }
+ }
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->cgx_evh_wq) {
+ flush_workqueue(rvu->cgx_evh_wq);
+ destroy_workqueue(rvu->cgx_evh_wq);
+ rvu->cgx_evh_wq = NULL;
+ }
+}
+
+int rvu_cgx_probe(struct rvu *rvu)
+{
+ int i, err;
+
+ /* find available cgx ports */
+ rvu->cgx_cnt = cgx_get_cgx_cnt();
+ if (!rvu->cgx_cnt) {
+ dev_info(rvu->dev, "No CGX devices found!\n");
+ return -ENODEV;
+ }
+
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
+ GFP_KERNEL);
+ if (!rvu->cgx_idmap)
+ return -ENOMEM;
+
+ /* Initialize the cgxdata table */
+ for (i = 0; i < rvu->cgx_cnt; i++)
+ rvu->cgx_idmap[i] = cgx_get_pdata(i);
+
+ /* Map CGX LMAC interfaces to RVU PFs */
+ err = rvu_map_cgx_lmac_pf(rvu);
+ if (err)
+ return err;
+
+ /* Register for CGX events */
+ cgx_lmac_event_handler_init(rvu);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 000000000000..d871a394e72b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE (0x10)
+#define RVU_AF_ECO (0x20)
+#define RVU_AF_BLK_RST (0x30)
+#define RVU_AF_PF_BAR4_ADDR (0x40)
+#define RVU_AF_RAS (0x100)
+#define RVU_AF_RAS_W1S (0x108)
+#define RVU_AF_RAS_ENA_W1S (0x110)
+#define RVU_AF_RAS_ENA_W1C (0x118)
+#define RVU_AF_GEN_INT (0x120)
+#define RVU_AF_GEN_INT_W1S (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138)
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800)
+#define RVU_AF_PFTRPEND (0x2810)
+#define RVU_AF_PFTRPEND_W1S (0x2820)
+#define RVU_AF_PF_RST (0x2840)
+#define RVU_AF_HWVF_RST (0x2850)
+#define RVU_AF_PFAF_MBOX_INT (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898)
+#define RVU_AF_PFFLR_INT (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8)
+#define RVU_AF_PFME_INT (0x28c0)
+#define RVU_AF_PFME_INT_W1S (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST (0x8000000)
+#define RVU_PRIV_GEN_CFG (0x8000010)
+#define RVU_PRIV_CLK_CFG (0x8000020)
+#define RVU_PRIV_ACTIVE_PC (0x8000030)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
+#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
+#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST (0x0000)
+#define NPA_AF_CONST (0x0010)
+#define NPA_AF_CONST1 (0x0018)
+#define NPA_AF_LF_RST (0x0020)
+#define NPA_AF_GEN_CFG (0x0030)
+#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_INP_CTL (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
+#define NPA_AF_AVG_DELAY (0x0100)
+#define NPA_AF_GEN_INT (0x0140)
+#define NPA_AF_GEN_INT_W1S (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C (0x0158)
+#define NPA_AF_RVU_INT (0x0160)
+#define NPA_AF_RVU_INT_W1S (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C (0x0178)
+#define NPA_AF_ERR_INT (0x0180)
+#define NPA_AF_ERR_INT_W1S (0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S (0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C (0x0198)
+#define NPA_AF_RAS (0x01A0)
+#define NPA_AF_RAS_W1S (0x01A8)
+#define NPA_AF_RAS_ENA_W1S (0x01B0)
+#define NPA_AF_RAS_ENA_W1C (0x01B8)
+#define NPA_AF_BP_TEST (0x0200)
+#define NPA_AF_ECO (0x0300)
+#define NPA_AF_AQ_CFG (0x0600)
+#define NPA_AF_AQ_BASE (0x0610)
+#define NPA_AF_AQ_STATUS (0x0620)
+#define NPA_AF_AQ_DOOR (0x0630)
+#define NPA_AF_AQ_DONE_WAIT (0x0640)
+#define NPA_AF_AQ_DONE (0x0650)
+#define NPA_AF_AQ_DONE_ACK (0x0660)
+#define NPA_AF_AQ_DONE_INT (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000)
+#define NPA_PRIV_LFX_CFG (0x10010)
+#define NPA_PRIV_LFX_INT_CFG (0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG (0x0000)
+#define NIX_AF_STATUS (0x0010)
+#define NIX_AF_NDC_CFG (0x0018)
+#define NIX_AF_CONST (0x0020)
+#define NIX_AF_CONST1 (0x0028)
+#define NIX_AF_CONST2 (0x0030)
+#define NIX_AF_CONST3 (0x0038)
+#define NIX_AF_SQ_CONST (0x0040)
+#define NIX_AF_CQ_CONST (0x0048)
+#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PSE_CONST (0x0060)
+#define NIX_AF_TL1_CONST (0x0070)
+#define NIX_AF_TL2_CONST (0x0078)
+#define NIX_AF_TL3_CONST (0x0080)
+#define NIX_AF_TL4_CONST (0x0088)
+#define NIX_AF_MDQ_CONST (0x0090)
+#define NIX_AF_MC_MIRROR_CONST (0x0098)
+#define NIX_AF_LSO_CFG (0x00A8)
+#define NIX_AF_BLK_RST (0x00B0)
+#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_RX_CFG (0x00D0)
+#define NIX_AF_AVG_DELAY (0x00E0)
+#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_RX_MCAST_BASE (0x0100)
+#define NIX_AF_RX_MCAST_CFG (0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148)
+#define NIX_AF_LF_RST (0x0150)
+#define NIX_AF_GEN_INT (0x0160)
+#define NIX_AF_GEN_INT_W1S (0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S (0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C (0x0178)
+#define NIX_AF_ERR_INT (0x0180)
+#define NIX_AF_ERR_INT_W1S (0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S (0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C (0x0198)
+#define NIX_AF_RAS (0x01A0)
+#define NIX_AF_RAS_W1S (0x01A8)
+#define NIX_AF_RAS_ENA_W1S (0x01B0)
+#define NIX_AF_RAS_ENA_W1C (0x01B8)
+#define NIX_AF_RVU_INT (0x01C0)
+#define NIX_AF_RVU_INT_W1S (0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
+#define NIX_AF_TCP_TIMER (0x01E0)
+#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_OL2 (0x0200)
+#define NIX_AF_RX_DEF_OIP4 (0x0210)
+#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_IIP6 (0x0240)
+#define NIX_AF_RX_DEF_OTCP (0x0250)
+#define NIX_AF_RX_DEF_ITCP (0x0260)
+#define NIX_AF_RX_DEF_OUDP (0x0270)
+#define NIX_AF_RX_DEF_IUDP (0x0280)
+#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_ISCTP (0x02A0)
+#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_NDC_TX_SYNC (0x03F0)
+#define NIX_AF_AQ_CFG (0x0400)
+#define NIX_AF_AQ_BASE (0x0410)
+#define NIX_AF_AQ_STATUS (0x0420)
+#define NIX_AF_AQ_DOOR (0x0430)
+#define NIX_AF_AQ_DONE_WAIT (0x0440)
+#define NIX_AF_AQ_DONE (0x0450)
+#define NIX_AF_AQ_DONE_ACK (0x0460)
+#define NIX_AF_AQ_DONE_TIMER (0x0470)
+#define NIX_AF_AQ_DONE_INT (0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S (0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530)
+#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE (0x0560)
+#define NIX_AF_SEB_ECO (0x0600)
+#define NIX_AF_SEB_TEST_BP (0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
+#define NIX_AF_PSE_SHAPER_CFG (0x810)
+#define NIX_AF_TX_EXPR_CREDIT (0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG (0xce0)
+#define NIX_AF_TL1B_DEBUG (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG (0xee0)
+#define NIX_AF_TL2B_DEBUG (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG (0x10e0)
+#define NIX_AF_TL3B_DEBUG (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG (0x12e0)
+#define NIX_AF_TL4B_DEBUG (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG (0x14e0)
+#define NIX_AF_MDQB_DEBUG (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+
+#define NIX_PRIV_AF_INT_CFG (0x8000000)
+#define NIX_PRIV_LFX_CFG (0x8000010)
+#define NIX_PRIV_LFX_INT_CFG (0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+
+/* SSO */
+#define SSO_AF_CONST (0x1000)
+#define SSO_AF_CONST1 (0x1008)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
+#define SSOW_AF_LF_HWS_RST (0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG (0x20000)
+#define TIM_PRIV_LFX_INT_CFG (0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_BLK_RST (0x46000)
+
+#define NDC_AF_BLK_RST (0x002F0)
+#define NPC_AF_BLK_RST (0x00040)
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 000000000000..92e05817ffe7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC0 = 0xcULL,
+ BLKADDR_NDC1 = 0xdULL,
+ BLKADDR_NDC2 = 0xeULL,
+ BLK_COUNT = 0xfULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+ BLKTYPE_RVUM = 0x0,
+ BLKTYPE_MSIX = 0x1,
+ BLKTYPE_LMT = 0x2,
+ BLKTYPE_NIX = 0x3,
+ BLKTYPE_NPA = 0x4,
+ BLKTYPE_NPC = 0x5,
+ BLKTYPE_SSO = 0x6,
+ BLKTYPE_SSOW = 0x7,
+ BLKTYPE_TIM = 0x8,
+ BLKTYPE_CPT = 0x9,
+ BLKTYPE_NDC = 0xa,
+ BLKTYPE_MAX = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+ RVU_AF_INT_VEC_POISON = 0x0,
+ RVU_AF_INT_VEC_PFFLR = 0x1,
+ RVU_AF_INT_VEC_PFME = 0x2,
+ RVU_AF_INT_VEC_GEN = 0x3,
+ RVU_AF_INT_VEC_MBOX = 0x4,
+ RVU_AF_INT_VEC_CNT = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+ RVU_PF_INT_VEC_VFFLR0 = 0x0,
+ RVU_PF_INT_VEC_VFFLR1 = 0x1,
+ RVU_PF_INT_VEC_VFME0 = 0x2,
+ RVU_PF_INT_VEC_VFME1 = 0x3,
+ RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
+ RVU_PF_INT_VEC_CNT = 0x7,
+};
+
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d25e16d2c319..109472d6b61f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -167,8 +167,13 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ppp = pfcrx;
params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
- params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
- params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ if (mlx4_low_memory_profile()) {
+ params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE;
+ } else {
+ params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ }
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d2d59444f562..6a046030e873 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
NULL, NULL, NULL),
};
-static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
-{
- struct mlx4_priv *priv = devlink_priv(devlink);
- struct mlx4_dev *dev = &priv->dev;
- int err;
-
- err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
- if (err)
- mlx4_warn(dev,
- "devlink set parameter %u value failed (err = %d)",
- param_id, err);
-}
-
static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
{
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c3228b89df46..485d856546c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -72,7 +72,7 @@
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
#define DEF_RX_RINGS 16
#define MAX_RX_RINGS 128
-#define MIN_RX_RINGS 4
+#define MIN_RX_RINGS 1
#define LOG_TXBB_SIZE 6
#define TXBB_SIZE BIT(LOG_TXBB_SIZE)
#define HEADROOM (2048 / TXBB_SIZE + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 1d743bd5d212..d7fbd5b6ac95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -173,6 +173,7 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
}
}
+/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
@@ -181,6 +182,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
+/* Use this function to get max num channels after netdev was created */
+static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
+{
+ return min_t(unsigned int, netdev->num_rx_queues,
+ netdev->num_tx_queues);
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -673,7 +681,7 @@ struct mlx5e_priv {
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
- struct delayed_work update_stats_work;
+ struct work_struct update_stats_work;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
@@ -698,7 +706,7 @@ struct mlx5e_priv {
};
struct mlx5e_profile {
- void (*init)(struct mlx5_core_dev *mdev,
+ int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile, void *ppriv);
void (*cleanup)(struct mlx5e_priv *priv);
@@ -710,7 +718,6 @@ struct mlx5e_profile {
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
- int (*max_nch)(struct mlx5_core_dev *mdev);
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
@@ -926,8 +933,8 @@ int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
-void mlx5e_update_stats_work(struct work_struct *work);
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
@@ -962,9 +969,15 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
/* mlx5e generic netdev management API */
+int mlx5e_netdev_init(struct net_device *netdev,
+ struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- void *ppriv);
+ int nch, void *ppriv);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c86fd770c463..c61f6302cde2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -319,7 +319,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
- ch->max_combined = priv->profile->max_nch(priv->mdev);
+ ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
ch->combined_count = priv->channels.params.num_channels;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bc034958c846..c9848e333450 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -272,10 +272,9 @@ static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv);
}
-void mlx5e_update_stats_work(struct work_struct *work)
+static void mlx5e_update_stats_work(struct work_struct *work)
{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
@@ -283,6 +282,17 @@ void mlx5e_update_stats_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
+{
+ if (!priv->profile->update_stats)
+ return;
+
+ if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
+ return;
+
+ queue_work(priv->wq, &priv->update_stats_work);
+}
+
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
@@ -1789,7 +1799,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct mlx5e_priv *priv = c->priv;
- int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
+ int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * max_nch;
@@ -2429,7 +2439,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
int err;
int ix;
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
rqt = &priv->direct_tir[ix].rqt;
err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
if (err)
@@ -2450,7 +2460,7 @@ void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
{
int i;
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
}
@@ -2544,7 +2554,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
struct mlx5e_redirect_rqt_param direct_rrp = {
.is_rss = false,
{
@@ -2745,7 +2755,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
goto free_in;
}
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -2845,7 +2855,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, tc;
for (i = 0; i < max_nch; i++)
@@ -2957,9 +2967,7 @@ int mlx5e_open_locked(struct net_device *netdev)
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- if (priv->profile->update_stats)
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ mlx5e_queue_update_stats(priv);
return 0;
err_clear_state_opened_flag:
@@ -3239,7 +3247,7 @@ err_destroy_inner_tirs:
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
{
- int nch = priv->profile->max_nch(priv->mdev);
+ int nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_tir *tir;
void *tirc;
int inlen;
@@ -3292,7 +3300,7 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
{
- int nch = priv->profile->max_nch(priv->mdev);
+ int nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
for (i = 0; i < nch; i++)
@@ -3441,7 +3449,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
/* update HW stats in background for next time */
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+ mlx5e_queue_update_stats(priv);
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
@@ -4560,33 +4568,6 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_build_rss_params(params);
}
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
- priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_opened_tc = 1;
-
- mlx5e_build_nic_params(mdev, &priv->channels.params,
- profile->max_nch(mdev), netdev->mtu);
-
- mutex_init(&priv->state_lock);
-
- INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
- INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
- INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
-
- mlx5e_timestamp_init(priv);
-}
-
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4749,15 +4730,23 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
}
-static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
+
+ mlx5e_build_nic_params(mdev, &priv->channels.params,
+ mlx5e_get_netdev_max_channels(netdev), netdev->mtu);
+
+ mlx5e_timestamp_init(priv);
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4766,12 +4755,15 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
+
+ return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4934,7 +4926,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
.update_stats = mlx5e_update_ndo_stats,
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
@@ -4943,13 +4934,53 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
/* mlx5e generic netdev management API (move to en_common.c) */
+/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
+int mlx5e_netdev_init(struct net_device *netdev,
+ struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ /* priv init */
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+ priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_opened_tc = 1;
+
+ mutex_init(&priv->state_lock);
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
+ INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ return -ENOMEM;
+
+ /* netdev init */
+ netif_carrier_off(netdev);
+
+#ifdef CONFIG_MLX5_EN_ARFS
+ netdev->rx_cpu_rmap = mdev->rmap;
+#endif
+
+ return 0;
+}
+
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
+{
+ destroy_workqueue(priv->wq);
+}
+
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
+ int nch,
void *ppriv)
{
- int nch = profile->max_nch(mdev);
struct net_device *netdev;
- struct mlx5e_priv *priv;
+ int err;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
@@ -4959,25 +4990,15 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
-#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mdev->rmap;
-#endif
-
- profile->init(mdev, netdev, profile, ppriv);
-
- netif_carrier_off(netdev);
-
- priv = netdev_priv(netdev);
-
- priv->wq = create_singlethread_workqueue("mlx5e");
- if (!priv->wq)
- goto err_cleanup_nic;
+ err = profile->init(mdev, netdev, profile, ppriv);
+ if (err) {
+ mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
+ goto err_free_netdev;
+ }
return netdev;
-err_cleanup_nic:
- if (profile->cleanup)
- profile->cleanup(priv);
+err_free_netdev:
free_netdev(netdev);
return NULL;
@@ -5023,7 +5044,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
- cancel_delayed_work_sync(&priv->update_stats_work);
+ cancel_work_sync(&priv->update_stats_work);
}
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
@@ -5031,7 +5052,6 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
free_netdev(netdev);
@@ -5080,6 +5100,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
void *rpriv = NULL;
void *priv;
int err;
+ int nch;
err = mlx5e_check_required_hca_cap(mdev);
if (err)
@@ -5095,7 +5116,8 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
}
#endif
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
+ nch = mlx5e_get_max_num_channels(mdev);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
goto err_free_rpriv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 9264c3332aa6..64c2b9ea8b1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -992,8 +992,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_priv *priv = netdev_priv(dev);
/* update HW stats in background for next time */
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ mlx5e_queue_update_stats(priv);
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
@@ -1078,28 +1077,33 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
}
-static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
-
- mutex_init(&priv->state_lock);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
- priv->channels.params.num_channels = 1;
+ priv->channels.params.num_channels =
+ mlx5e_get_netdev_max_channels(netdev);
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
+
+ return 0;
+}
+
+static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
+{
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -1224,12 +1228,12 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_hw_counters,
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
@@ -1292,13 +1296,14 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
- int err;
+ int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
+ nch = mlx5e_get_max_num_channels(dev);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b7d4896c7c7b..1c006869a642 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -133,7 +133,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
@@ -1217,7 +1217,7 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -1229,7 +1229,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1264,7 +1264,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 299e2a897f7e..b59953daf8b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -71,27 +71,25 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
u16 max_mtu;
+ int err;
- /* priv init */
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
- priv->max_opened_tc = 1;
- mutex_init(&priv->state_lock);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->mtu = max_mtu;
mlx5e_build_nic_params(mdev, &priv->channels.params,
- profile->max_nch(mdev), netdev->mtu);
+ mlx5e_get_netdev_max_channels(netdev),
+ netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -108,20 +106,23 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+
+ return 0;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
-static void mlx5i_cleanup(struct mlx5e_priv *priv)
+void mlx5i_cleanup(struct mlx5e_priv *priv)
{
- /* Do nothing .. */
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_sw_stats s = { 0 };
int i, j;
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+ for (i = 0; i < max_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@@ -418,7 +419,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.enable = NULL, /* mlx5i_enable */
.disable = NULL, /* mlx5i_disable */
.update_stats = NULL, /* mlx5i_update_stats */
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
@@ -650,7 +650,6 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
- destroy_workqueue(priv->wq);
if (!ipriv->sub_interface) {
mlx5i_pkey_qpn_ht_cleanup(netdev);
@@ -658,58 +657,37 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
}
}
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
+static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
+{
+ return mdev->mlx5e_res.pdn != 0;
+}
+
+static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
{
- const struct mlx5e_profile *profile;
- struct net_device *netdev;
+ if (mlx5_is_sub_interface(mdev))
+ return mlx5i_pkey_get_profile();
+ return &mlx5i_nic_profile;
+}
+
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+ struct net_device *netdev, void *param)
+{
+ struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
+ const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
struct rdma_netdev *rn;
- bool sub_interface;
- int nch;
int err;
- if (mlx5i_check_required_hca_cap(mdev)) {
- mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- /* TODO: Need to find a better way to check if child device*/
- sub_interface = (mdev->mlx5e_res.pdn != 0);
-
- if (sub_interface)
- profile = mlx5i_pkey_get_profile();
- else
- profile = &mlx5i_nic_profile;
-
- nch = profile->max_nch(mdev);
-
- netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
- name, NET_NAME_UNKNOWN,
- setup,
- nch * MLX5E_MAX_NUM_TC,
- nch);
- if (!netdev) {
- mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
- return NULL;
- }
-
ipriv = netdev_priv(netdev);
epriv = mlx5i_epriv(netdev);
- epriv->wq = create_singlethread_workqueue("mlx5i");
- if (!epriv->wq)
- goto err_free_netdev;
-
- ipriv->sub_interface = sub_interface;
+ ipriv->sub_interface = mlx5_is_sub_interface(mdev);
if (!ipriv->sub_interface) {
err = mlx5i_pkey_qpn_ht_init(netdev);
if (err) {
mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
- goto destroy_wq;
+ return err;
}
/* This should only be called once per mdev */
@@ -718,7 +696,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
goto destroy_ht;
}
- profile->init(mdev, netdev, profile, ipriv);
+ prof->init(mdev, netdev, prof, ipriv);
mlx5e_attach_netdev(epriv);
netif_carrier_off(netdev);
@@ -734,15 +712,35 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
netdev->priv_destructor = mlx5_rdma_netdev_free;
netdev->needs_free_netdev = 1;
- return netdev;
+ return 0;
destroy_ht:
mlx5i_pkey_qpn_ht_cleanup(netdev);
-destroy_wq:
- destroy_workqueue(epriv->wq);
-err_free_netdev:
- free_netdev(netdev);
+ return err;
+}
- return NULL;
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params)
+{
+ int nch;
+ int rc;
+
+ rc = mlx5i_check_required_hca_cap(mdev);
+ if (rc)
+ return rc;
+
+ nch = mlx5e_get_max_num_channels(mdev);
+
+ *params = (struct rdma_netdev_alloc_params){
+ .sizeof_priv = sizeof(struct mlx5i_priv) +
+ sizeof(struct mlx5e_priv),
+ .txqs = nch * MLX5E_MAX_NUM_TC,
+ .rxqs = nch,
+ .param = mdev,
+ .initialize_rdma_netdev = mlx5_rdma_setup_rn,
+ };
+
+ return 0;
}
-EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
+EXPORT_SYMBOL(mlx5_rdma_rn_get_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 2e7fb829e1b0..5ef3ef0072b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -84,10 +84,11 @@ void mlx5i_dev_cleanup(struct net_device *dev);
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
-void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
+int mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+void mlx5i_cleanup(struct mlx5e_priv *priv);
/* Get child interface nic profile */
const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index e3e8a5f1ac9b..b491b8f5fd6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -275,14 +275,17 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5i_pkey_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ int err;
- mlx5i_init(mdev, netdev, profile, ppriv);
+ err = mlx5i_init(mdev, netdev, profile, ppriv);
+ if (err)
+ return err;
/* Override parent ndo */
netdev->netdev_ops = &mlx5i_pkey_netdev_ops;
@@ -292,12 +295,14 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
/* Use dummy rqs */
priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+ return 0;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
{
- /* Do nothing .. */
+ mlx5i_cleanup(priv);
}
static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
@@ -346,7 +351,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.enable = NULL,
.disable = NULL,
.update_stats = NULL,
- .max_nch = mlx5e_get_max_num_channels,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 83f452b7ccbb..bb99f6d41fe0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -221,7 +221,7 @@ MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
/* pci_eqe_cqn
- * Completion Queue that triggeret this EQE.
+ * Completion Queue that triggered this EQE.
*/
MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index df81e0a1eb64..32cb6718bb17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type {
MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC,
};
/* reg_sfd_rec_type
@@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
}
+/* reg_sfd_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * destination IP.
+ * When protocol is IPv6, reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24,
+ 8, MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_tunnel_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfd_uc_tunnel_protocol {
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfd_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27,
+ 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 destination IP.
+ * When protocol is IPv6, pointer to the underlay IPv6 destination IP
+ * which is configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0,
+ 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip,
+ enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
+ action);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
+ mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
+}
+
/* SFN - Switch FDB Notification Register
* -------------------------------------------
* The switch provides notifications on newly learned FDB entries and
@@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type {
MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
MLXSW_REG_SFDF_FLUSH_PER_LAG,
MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID,
};
/* reg_sfdf_flush_type
@@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type {
* 3 - All FID dynamic entries pointing to port are flushed.
* 4 - All dynamic entries pointing to LAG are flushed.
* 5 - All FID dynamic entries pointing to LAG are flushed.
+ * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed.
+ * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed, per FID.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
@@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
-static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
+/* reg_slcr_seed
+ * LAG seed value. The seed is the same for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32);
+
+static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
+ mlxsw_reg_slcr_seed_set(payload, seed);
}
/* SLCOR - Switch LAG Collector Register
@@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
mlxsw_reg_mgpc_opcode_set(payload, opcode);
}
+/* MPRS - Monitoring Parsing State Register
+ * ----------------------------------------
+ * The MPRS register is used for setting up the parsing for hash,
+ * policy-engine and routing.
+ */
+#define MLXSW_REG_MPRS_ID 0x9083
+#define MLXSW_REG_MPRS_LEN 0x14
+
+MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN);
+
+/* reg_mprs_parsing_depth
+ * Minimum parsing depth.
+ * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL
+ * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16);
+
+/* reg_mprs_parsing_en
+ * Parsing enable.
+ * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and
+ * NVGRE. Default is enabled. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16);
+
+/* reg_mprs_vxlan_udp_dport
+ * VxLAN UDP destination port.
+ * Used for identifying VxLAN packets and for dport field in
+ * encapsulation. Default is 4789.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16);
+
+static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
+ u16 vxlan_udp_dport)
+{
+ MLXSW_REG_ZERO(mprs, payload);
+ mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth);
+ mlxsw_reg_mprs_parsing_en_set(payload, true);
+ mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
+}
+
+/* TNGCR - Tunneling NVE General Configuration Register
+ * ----------------------------------------------------
+ * The TNGCR register is used for setting up the NVE Tunneling configuration.
+ */
+#define MLXSW_REG_TNGCR_ID 0xA001
+#define MLXSW_REG_TNGCR_LEN 0x44
+
+MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN);
+
+enum mlxsw_reg_tngcr_type {
+ MLXSW_REG_TNGCR_TYPE_VXLAN,
+ MLXSW_REG_TNGCR_TYPE_VXLAN_GPE,
+ MLXSW_REG_TNGCR_TYPE_GENEVE,
+ MLXSW_REG_TNGCR_TYPE_NVGRE,
+};
+
+/* reg_tngcr_type
+ * Tunnel type for encapsulation and decapsulation. The types are mutually
+ * exclusive.
+ * Note: For Spectrum the NVE parsing must be enabled in MPRS.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4);
+
+/* reg_tngcr_nve_valid
+ * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1);
+
+/* reg_tngcr_nve_ttl_uc
+ * The TTL for NVE tunnel encapsulation underlay unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8);
+
+/* reg_tngcr_nve_ttl_mc
+ * The TTL for NVE tunnel encapsulation underlay multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8);
+
+enum {
+ /* Do not copy flow label. Calculate flow label using nve_flh. */
+ MLXSW_REG_TNGCR_FL_NO_COPY,
+ /* Copy flow label from inner packet if packet is IPv6 and
+ * encapsulation is by IPv6. Otherwise, calculate flow label using
+ * nve_flh.
+ */
+ MLXSW_REG_TNGCR_FL_COPY,
+};
+
+/* reg_tngcr_nve_flc
+ * For NVE tunnel encapsulation: Flow label copy from inner packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1);
+
+enum {
+ /* Flow label is static. In Spectrum this means '0'. Spectrum-2
+ * uses {nve_fl_prefix, nve_fl_suffix}.
+ */
+ MLXSW_REG_TNGCR_FL_NO_HASH,
+ /* 8 LSBs of the flow label are calculated from ECMP hash of the
+ * inner packet. 12 MSBs are configured by nve_fl_prefix.
+ */
+ MLXSW_REG_TNGCR_FL_HASH,
+};
+
+/* reg_tngcr_nve_flh
+ * NVE flow label hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1);
+
+/* reg_tngcr_nve_fl_prefix
+ * NVE flow label prefix. Constant 12 MSBs of the flow label.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12);
+
+/* reg_tngcr_nve_fl_suffix
+ * NVE flow label suffix. Constant 8 LSBs of the flow label.
+ * Reserved when nve_flh=1 and for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8);
+
+enum {
+ /* Source UDP port is fixed (default '0') */
+ MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH,
+ /* Source UDP port is calculated based on hash */
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH,
+};
+
+/* reg_tngcr_nve_udp_sport_type
+ * NVE UDP source port type.
+ * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2).
+ * When the source UDP port is calculated based on hash, then the 8 LSBs
+ * are calculated from hash the 8 MSBs are configured by
+ * nve_udp_sport_prefix.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1);
+
+/* reg_tngcr_nve_udp_sport_prefix
+ * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port.
+ * Reserved when NVE type is NVGRE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8);
+
+/* reg_tngcr_nve_group_size_mc
+ * The amount of sequential linked lists of MC entries. The first linked
+ * list is configured by SFD.underlay_mc_ptr.
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8);
+
+/* reg_tngcr_nve_group_size_flood
+ * The amount of sequential linked lists of flooding entries. The first
+ * linked list is configured by SFMR.nve_tunnel_flood_ptr
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8);
+
+/* reg_tngcr_learn_enable
+ * During decapsulation, whether to learn from NVE port.
+ * Reserved when Spectrum-2. See TNPC.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1);
+
+/* reg_tngcr_underlay_virtual_router
+ * Underlay virtual router.
+ * Reserved when Spectrum-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16);
+
+/* reg_tngcr_underlay_rif
+ * Underlay ingress router interface. RIF type should be loopback generic.
+ * Reserved when Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16);
+
+/* reg_tngcr_usipv4
+ * Underlay source IPv4 address of the NVE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32);
+
+/* reg_tngcr_usipv6
+ * Underlay source IPv6 address of the NVE. For Spectrum, must not be
+ * modified under traffic of NVE tunneling encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16);
+
+static inline void mlxsw_reg_tngcr_pack(char *payload,
+ enum mlxsw_reg_tngcr_type type,
+ bool valid, u8 ttl)
+{
+ MLXSW_REG_ZERO(tngcr, payload);
+ mlxsw_reg_tngcr_type_set(payload, type);
+ mlxsw_reg_tngcr_nve_valid_set(payload, valid);
+ mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY);
+ mlxsw_reg_tngcr_nve_flh_set(payload, 0);
+ mlxsw_reg_tngcr_nve_udp_sport_type_set(payload,
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH);
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0);
+ mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1);
+ mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1);
+}
+
+/* TNUMT - Tunneling NVE Underlay Multicast Table Register
+ * -------------------------------------------------------
+ * The TNUMT register is for building the underlay MC table. It is used
+ * for MC, flooding and BC traffic into the NVE tunnel.
+ */
+#define MLXSW_REG_TNUMT_ID 0xA003
+#define MLXSW_REG_TNUMT_LEN 0x20
+
+MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN);
+
+enum mlxsw_reg_tnumt_record_type {
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ MLXSW_REG_TNUMT_RECORD_TYPE_LABEL,
+};
+
+/* reg_tnumt_record_type
+ * Record type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
+
+enum mlxsw_reg_tnumt_tunnel_port {
+ MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_tnumt_tunnel_port
+ * Tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4);
+
+/* reg_tnumt_underlay_mc_ptr
+ * Index to the underlay multicast table.
+ * For Spectrum the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24);
+
+/* reg_tnumt_vnext
+ * The next_underlay_mc_ptr is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1);
+
+/* reg_tnumt_next_underlay_mc_ptr
+ * The next index to the underlay multicast table.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24);
+
+/* reg_tnumt_record_size
+ * Number of IP addresses in the record.
+ * Range is 1..cap_max_nve_mc_entries_ipv{4,6}
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3);
+
+/* reg_tnumt_udip
+ * The underlay IPv4 addresses. udip[i] is reserved if i >= size
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false);
+
+/* reg_tnumt_udip_ptr
+ * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if
+ * i >= size. The IPv6 addresses are configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_tnumt_pack(char *payload,
+ enum mlxsw_reg_tnumt_record_type type,
+ enum mlxsw_reg_tnumt_tunnel_port tport,
+ u32 underlay_mc_ptr, bool vnext,
+ u32 next_underlay_mc_ptr,
+ u8 record_size)
+{
+ MLXSW_REG_ZERO(tnumt, payload);
+ mlxsw_reg_tnumt_record_type_set(payload, type);
+ mlxsw_reg_tnumt_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr);
+ mlxsw_reg_tnumt_vnext_set(payload, vnext);
+ mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr);
+ mlxsw_reg_tnumt_record_size_set(payload, record_size);
+}
+
+/* TNQCR - Tunneling NVE QoS Configuration Register
+ * ------------------------------------------------
+ * The TNQCR register configures how QoS is set in encapsulation into the
+ * underlay network.
+ */
+#define MLXSW_REG_TNQCR_ID 0xA010
+#define MLXSW_REG_TNQCR_LEN 0x0C
+
+MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN);
+
+/* reg_tnqcr_enc_set_dscp
+ * For encapsulation: How to set DSCP field:
+ * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay
+ * (outer) IP header. If there is no IP header, use TNQDR.dscp
+ * 1 - Set the DSCP field as TNQDR.dscp
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1);
+
+static inline void mlxsw_reg_tnqcr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(tnqcr, payload);
+ mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0);
+}
+
+/* TNQDR - Tunneling NVE QoS Default Register
+ * ------------------------------------------
+ * The TNQDR register configures the default QoS settings for NVE
+ * encapsulation.
+ */
+#define MLXSW_REG_TNQDR_ID 0xA011
+#define MLXSW_REG_TNQDR_LEN 0x08
+
+MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
+
+/* reg_tnqdr_local_port
+ * Local port number (receive port). CPU port is supported.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
+
+/* reg_tnqdr_dscp
+ * For encapsulation, the default DSCP.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
+
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(tnqdr, payload);
+ mlxsw_reg_tnqdr_local_port_set(payload, local_port);
+ mlxsw_reg_tnqdr_dscp_set(payload, 0);
+}
+
+/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TNEEM_ID 0xA012
+#define MLXSW_REG_TNEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN);
+
+/* reg_tneem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tneem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tneem, payload);
+ mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TNDEM_ID 0xA013
+#define MLXSW_REG_TNDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN);
+
+/* reg_tndem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tndem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tndem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tndem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4);
+
+/* reg_tndem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 ecn, bool trap_en,
+ u16 trap_id)
+{
+ MLXSW_REG_ZERO(tndem, payload);
+ mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tndem_eip_ecn_set(payload, ecn);
+ mlxsw_reg_tndem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tndem_trap_id_set(payload, trap_id);
+}
+
+/* TNPC - Tunnel Port Configuration Register
+ * -----------------------------------------
+ * The TNPC register is used for tunnel port configuration.
+ * Reserved when Spectrum.
+ */
+#define MLXSW_REG_TNPC_ID 0xA020
+#define MLXSW_REG_TNPC_LEN 0x18
+
+MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
+
+enum mlxsw_reg_tnpc_tunnel_port {
+ MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ MLXSW_REG_TNPC_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_tnpc_tunnel_port
+ * Tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4);
+
+/* reg_tnpc_learn_enable_v6
+ * During IPv6 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
+
+/* reg_tnpc_learn_enable_v4
+ * During IPv4 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
+
+static inline void mlxsw_reg_tnpc_pack(char *payload,
+ enum mlxsw_reg_tnpc_tunnel_port tport,
+ bool learn_enable)
+{
+ MLXSW_REG_ZERO(tnpc, payload);
+ mlxsw_reg_tnpc_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable);
+ mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable);
+}
+
/* TIGCR - Tunneling IPinIP General Configuration Register
* -------------------------------------------------------
* The TIGCR register is used for setting up the IPinIP Tunnel configuration.
@@ -8828,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcc),
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
+ MLXSW_REG(mprs),
+ MLXSW_REG(tngcr),
+ MLXSW_REG(tnumt),
+ MLXSW_REG(tnqcr),
+ MLXSW_REG(tnqdr),
+ MLXSW_REG(tneem),
+ MLXSW_REG(tndem),
+ MLXSW_REG(tnpc),
MLXSW_REG(tigcr),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 79a31de7c825..99b341539870 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -46,6 +46,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_RIFS,
MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
MLXSW_RES_ID_MAX_LPM_TREES,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
/* Internal resources.
* Determined by the SW, not queried from the HW.
@@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
[MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
};
struct mlxsw_res {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2b14fd0dcc42..ed7e4c4e7403 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -21,6 +21,7 @@
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <linux/netlink.h>
+#include <linux/random.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -3469,6 +3470,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
@@ -3482,6 +3484,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ /* NVE traps */
+ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3666,8 +3670,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u32 seed;
int err;
+ get_random_bytes(&seed, sizeof(seed));
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
@@ -3676,7 +3682,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_DIP |
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
- MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
+ MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3cdb7aca90b7..1f68ac2a20f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -383,6 +383,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
#endif
/* spectrum_router.c */
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
+};
+
+union mlxsw_sp_l3addr {
+ __be32 addr4;
+ struct in6_addr addr6;
+};
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
@@ -416,6 +427,10 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
@@ -423,6 +438,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
static inline unsigned int
@@ -433,6 +449,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */
default:
return 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 68c8b148bef2..8d14770766b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 1a60391daafa..3dbafdeaab2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -7,17 +7,6 @@
#include "spectrum.h"
#include "reg.h"
-enum mlxsw_sp_l3proto {
- MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_SP_L3_PROTO_IPV6,
-#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
-};
-
-union mlxsw_sp_l3addr {
- __be32 addr4;
- struct in6_addr addr6;
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index db715da7bab7..fa16ad2c6a50 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -17,7 +17,6 @@
#include <net/switchdev.h>
#include "spectrum_span.h"
-#include "spectrum_router.h"
#include "spectrum_switchdev.h"
#include "spectrum.h"
#include "core.h"
@@ -2289,7 +2288,7 @@ struct mlxsw_sp_switchdev_event_work {
unsigned long event;
};
-static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
+static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
{
struct mlxsw_sp_switchdev_event_work *switchdev_work =
container_of(work, struct mlxsw_sp_switchdev_event_work, work);
@@ -2344,16 +2343,23 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct mlxsw_sp_switchdev_event_work *switchdev_work;
- struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct net_device *br_dev;
- if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
+ /* Tunnel devices are not our uppers, so check their master instead */
+ br_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (!br_dev)
+ return NOTIFY_DONE;
+ if (!netif_is_bridge_master(br_dev))
+ return NOTIFY_DONE;
+ if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
return NOTIFY_DONE;
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return NOTIFY_BAD;
- INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
switchdev_work->dev = dev;
switchdev_work->event = event;
@@ -2362,6 +2368,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_bridge_fdb_event_work);
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53020724c2f6..6f18f4d3322a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -24,6 +24,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
+ MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
@@ -59,6 +60,7 @@ enum {
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+ MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 8f11fdba8d0e..9b8a17ee3cb3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1620,7 +1620,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 0cf0b0935b3b..4c23d18bbf44 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -128,11 +128,16 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
len += sz;
} while (len < buf_len);
- /* Read the FCS and discard it */
+ /* Read the FCS */
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
/* Update the statistics if part of the FCS was read before */
len -= ETH_FCS_LEN - sz;
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
if (sz < 0) {
err = sz;
break;
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 5b06f07c78cd..3c661f422688 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 305ac07dc1e7..c0830c0c2c3f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 934a70835473..f907b7d98917 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -1,36 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index fdcd2bc98916..9b6cfa697879 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bpf.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 813644e90b27..721921bcf120 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_BPF_FW_H
#define NFP_BPF_FW_H 1
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 6ed1b5207ecd..97d33bb4d84d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#define pr_fmt(fmt) "NFP net bpf: " fmt
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index d9d37aa860e0..6243af0ab025 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <net/pkt_cls.h>
@@ -509,11 +479,6 @@ err_free_bpf:
return err;
}
-static void nfp_check_rhashtable_empty(void *ptr, void *arg)
-{
- WARN_ON_ONCE(1);
-}
-
static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 25e10cfa2678..52457ae3b259 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#ifndef __NFP_BPF_H__
#define __NFP_BPF_H__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 49c7bead8113..927e038d9f77 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
/*
* nfp_net_offload.c
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index cddb70786a58..193dd685b365 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 46ba0cf257c6..f2b1938236fe 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/geneve.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index cb8565222621..4c5eaf36d5bb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 325954b829c8..29d673aa5277 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_FLOWER_CMSG_H
#define NFP_FLOWER_CMSG_H
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index bf10598f66ae..81dcf5b318ba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include "main.h"
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index e57d23746585..3a54728d2ea6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
@@ -518,8 +488,8 @@ err_clear_nn:
static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
+ u64 version, features, ctx_count;
struct nfp_flower_priv *app_priv;
- u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -543,6 +513,16 @@ static int nfp_flower_init(struct nfp_app *app)
return err;
}
+ ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
+ &err);
+ if (err) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: unsupported host context count: %d\n",
+ err);
+ err = 0;
+ ctx_count = BIT(17);
+ }
+
/* We need to ensure hardware has enough flower capabilities. */
if (version != NFP_FLOWER_ALLOWED_VER) {
nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
@@ -553,6 +533,7 @@ static int nfp_flower_init(struct nfp_app *app)
if (!app_priv)
return -ENOMEM;
+ app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
app->priv = app_priv;
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs_high);
@@ -563,7 +544,7 @@ static int nfp_flower_init(struct nfp_app *app)
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
spin_lock_init(&app_priv->mtu_conf.lock);
- err = nfp_flower_metadata_init(app);
+ err = nfp_flower_metadata_init(app, ctx_count);
if (err)
goto err_free_app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 81d941ab895c..90045bab95bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
@@ -38,6 +8,7 @@
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
+#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
@@ -50,10 +21,8 @@ struct net_device;
struct nfp_app;
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
-#define NFP_FL_STATS_ENTRY_RS BIT(20)
-#define NFP_FL_STATS_ELEM_RS 4
-#define NFP_FL_REPEATED_HASH_MAX BIT(17)
-#define NFP_FLOWER_HASH_BITS 19
+#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
+ init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
@@ -138,7 +107,10 @@ struct nfp_fl_lag {
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
+ * @stats_ring_size: Maximum number of allowed stats ids
* @flow_table: Hash table used to store flower rules
+ * @stats: Stored stats updates for flower rules
+ * @stats_lock: Lock for flower rule stats updates
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
@@ -171,7 +143,10 @@ struct nfp_flower_priv {
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
- DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
+ u32 stats_ring_size;
+ struct rhashtable flow_table;
+ struct nfp_fl_stats *stats;
+ spinlock_t stats_lock; /* lock stats */
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
@@ -227,10 +202,8 @@ struct nfp_fl_stats {
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
- struct hlist_node link;
+ struct rhash_head fl_node;
struct rcu_head rcu;
- spinlock_t lock; /* lock stats */
- struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
struct net_device *ingress_dev;
char *unmasked_data;
@@ -239,6 +212,8 @@ struct nfp_fl_payload {
bool ingress_offload;
};
+extern const struct rhashtable_params nfp_flower_table_params;
+
struct nfp_fl_stats_frame {
__be32 stats_con_id;
__be32 pkt_count;
@@ -246,7 +221,7 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
-int nfp_flower_metadata_init(struct nfp_app *app);
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 17acb8cc6044..e54fb6034326 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index c098730544b7..48729bf171e0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/hash.h>
#include <linux/hashtable.h>
@@ -48,6 +18,12 @@ struct nfp_mask_id_table {
u8 mask_id;
};
+struct nfp_fl_flow_table_cmp_arg {
+ struct net_device *netdev;
+ unsigned long cookie;
+ __be32 host_ctx;
+};
+
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
@@ -55,14 +31,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
ring = &priv->stats_ids.free_list;
/* Check if buffer is full. */
- if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
- NFP_FL_STATS_ELEM_RS -
+ if (!CIRC_SPACE(ring->head, ring->tail,
+ priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
NFP_FL_STATS_ELEM_RS + 1))
return -ENOBUFS;
memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
@@ -74,7 +50,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
- freed_stats_id = NFP_FL_STATS_ENTRY_RS;
+ freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
*stats_context_id = priv->stats_ids.init_unalloc - 1;
@@ -92,7 +68,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
*stats_context_id = temp_stats_id;
memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
@@ -102,56 +78,37 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx)
{
+ struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_fl_payload *flower_entry;
- hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
- tc_flower_cookie)
- if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
- (!netdev || flower_entry->ingress_dev == netdev) &&
- (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flower_entry->meta.host_ctx_id == host_ctx))
- return flower_entry;
+ flower_cmp_arg.netdev = netdev;
+ flower_cmp_arg.cookie = tc_flower_cookie;
+ flower_cmp_arg.host_ctx = host_ctx;
- return NULL;
-}
-
-static void
-nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
-{
- struct nfp_fl_payload *nfp_flow;
- unsigned long flower_cookie;
-
- flower_cookie = be64_to_cpu(stats->stats_cookie);
-
- rcu_read_lock();
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
- stats->stats_con_id);
- if (!nfp_flow)
- goto exit_rcu_unlock;
-
- spin_lock(&nfp_flow->lock);
- nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
- nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
- nfp_flow->stats.used = jiffies;
- spin_unlock(&nfp_flow->lock);
-
-exit_rcu_unlock:
- rcu_read_unlock();
+ return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
+ nfp_flower_table_params);
}
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
- struct nfp_fl_stats_frame *stats_frame;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_stats_frame *stats;
unsigned char *msg;
+ u32 ctx_id;
int i;
msg = nfp_flower_cmsg_get_data(skb);
- stats_frame = (struct nfp_fl_stats_frame *)msg;
- for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
- nfp_flower_update_stats(app, stats_frame + i);
+ spin_lock(&priv->stats_lock);
+ for (i = 0; i < msg_len / sizeof(*stats); i++) {
+ stats = (struct nfp_fl_stats_frame *)msg + i;
+ ctx_id = be32_to_cpu(stats->stats_con_id);
+ priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
+ priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
+ priv->stats[ctx_id].used = jiffies;
+ }
+ spin_unlock(&priv->stats_lock);
}
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
@@ -345,9 +302,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- nfp_flow->stats.pkts = 0;
- nfp_flow->stats.bytes = 0;
- nfp_flow->stats.used = jiffies;
+ priv->stats[stats_cxt].pkts = 0;
+ priv->stats[stats_cxt].bytes = 0;
+ priv->stats[stats_cxt].used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
NFP_FL_STATS_CTX_DONT_CARE);
@@ -389,12 +346,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
return nfp_release_stats_entry(app, temp_ctx_id);
}
-int nfp_flower_metadata_init(struct nfp_app *app)
+static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
+ const struct nfp_fl_payload *flow_entry = obj;
+
+ if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
+ (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
+ flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ return flow_entry->tc_flower_cookie != cmp_arg->cookie;
+
+ return 1;
+}
+
+static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfp_fl_payload *flower_entry = data;
+
+ return jhash2((u32 *)&flower_entry->tc_flower_cookie,
+ sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
+ seed);
+}
+
+static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
+
+ return jhash2((u32 *)&cmp_arg->cookie,
+ sizeof(cmp_arg->cookie) / sizeof(u32), seed);
+}
+
+const struct rhashtable_params nfp_flower_table_params = {
+ .head_offset = offsetof(struct nfp_fl_payload, fl_node),
+ .hashfn = nfp_fl_key_hashfn,
+ .obj_cmpfn = nfp_fl_obj_cmpfn,
+ .obj_hashfn = nfp_fl_obj_hashfn,
+ .automatic_shrinking = true,
+};
+
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
{
struct nfp_flower_priv *priv = app->priv;
+ int err;
hash_init(priv->mask_table);
- hash_init(priv->flow_table);
+
+ err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
+ if (err)
+ return err;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -402,7 +403,7 @@ int nfp_flower_metadata_init(struct nfp_app *app)
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- return -ENOMEM;
+ goto err_free_flow_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -416,18 +417,29 @@ int nfp_flower_metadata_init(struct nfp_app *app)
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- NFP_FL_STATS_ENTRY_RS));
+ priv->stats_ring_size));
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
- priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
+ priv->stats_ids.init_unalloc = host_ctx_count;
+
+ priv->stats = kvmalloc_array(priv->stats_ring_size,
+ sizeof(struct nfp_fl_stats), GFP_KERNEL);
+ if (!priv->stats)
+ goto err_free_ring_buf;
+
+ spin_lock_init(&priv->stats_lock);
return 0;
+err_free_ring_buf:
+ vfree(priv->stats_ids.free_list.buf);
err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_flow_table:
+ rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
}
@@ -438,6 +450,9 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
if (!priv)
return;
+ rhashtable_free_and_destroy(&priv->flow_table,
+ nfp_check_rhashtable_empty, NULL);
+ kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
vfree(priv->stats_ids.free_list.buf);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index bd19624f10cf..29c95423ab64 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/skbuff.h>
#include <net/devlink.h>
@@ -428,8 +398,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- spin_lock_init(&flow_pay->lock);
-
flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -513,9 +481,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- INIT_HLIST_NODE(&flow_pay->link);
flow_pay->tc_flower_cookie = flow->cookie;
- hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
+ err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto err_destroy_flow;
+
port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
@@ -550,6 +521,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
int err;
@@ -573,11 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_flow;
err_free_flow:
- hash_del_rcu(&nfp_flow->link);
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &nfp_flow->fl_node,
+ nfp_flower_table_params));
kfree_rcu(nfp_flow, rcu);
return err;
}
@@ -598,8 +572,10 @@ static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
+ u32 ctx_id;
ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
@@ -610,13 +586,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->ingress_offload && egress)
return 0;
- spin_lock_bh(&nfp_flow->lock);
- tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
- nfp_flow->stats.pkts, nfp_flow->stats.used);
+ ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+ spin_lock_bh(&priv->stats_lock);
+ tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ priv->stats[ctx_id].used);
- nfp_flow->stats.pkts = 0;
- nfp_flow->stats.bytes = 0;
- spin_unlock_bh(&nfp_flow->lock);
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+ spin_unlock_bh(&priv->stats_lock);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 382bb93cb090..30c926c4bc47 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
index 8b56c27931bf..dd359a44adfb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
@@ -1,36 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __NFP_ABI__
#define __NFP_ABI__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 8607d09ab732..68a0991aac22 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -60,6 +30,11 @@ static const struct nfp_app_type *apps[] = {
#endif
};
+void nfp_check_rhashtable_empty(void *ptr, void *arg)
+{
+ WARN_ON_ONCE(1);
+}
+
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
{
if (nfp_netdev_is_nfp_net(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index c896eb8f87a1..4d6ecf99b1cc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _NFP_APP_H
#define _NFP_APP_H 1
@@ -196,6 +166,7 @@ struct nfp_app {
void *priv;
};
+void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index e2dfe4f168bb..f119277fd66c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index cc6ace2be8a9..b04b83687fe2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#include <linux/bitops.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 5b257c603e91..648c2810e5ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 4213fe42ac4d..808647ec3573 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/rtnetlink.h>
#include <net/devlink.h>
@@ -96,6 +66,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
+ unsigned int lanes;
int ret;
if (count < 2)
@@ -114,8 +85,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
goto out;
}
- ret = nfp_devlink_set_lanes(pf, eth_port.index,
- eth_port.port_lanes / count);
+ /* Special case the 100G CXP -> 2x40G split */
+ lanes = eth_port.port_lanes / count;
+ if (eth_port.lanes == 10 && count == 2)
+ lanes = 8 / count;
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
@@ -128,6 +103,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
+ unsigned int lanes;
int ret;
mutex_lock(&pf->lock);
@@ -143,7 +119,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
goto out;
}
- ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
+ /* Special case the 100G CXP -> 2x40G unsplit */
+ lanes = eth_port.port_lanes;
+ if (eth_port.port_lanes == 8)
+ lanes = 10;
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index f0dcf45aeec1..5cabb1aa9c0c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 9474a4eed8ce..6c10e8d119e4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 595b3dc280e3..a3613a2e0aa5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_main.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 439e6ffe2f05..6f0c37d09256 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 7b91e77b2016..6bddfcfdec34 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_common.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 2190836eaa1d..f2aaef976c7d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 863ca04fffbf..d7c8518ac952 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_ctrl.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index b6b897840ac5..769ceef09756 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 099b63d67451..69b1c9b62e3d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6a79c8e4a7a4..cb9c512abc76 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_ethtool.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 0b1ac9c234d1..1e7d20468a34 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 18a09cdcd9c6..c09b893c30dd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/io-64-nonatomic-hi-lo.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 1bf2b18109ab..c412b94bfb97 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_NET_REPR_H
#define NFP_NET_REPR_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 8b1b962cf1d1..b6ec46ed0540 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index e9df9d1eab8e..c9f09c5bb5ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 68928c86b698..d2c1e9ea5668 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_netvf_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 9c1298114c70..86bc149ca231 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/lockdep.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 51f10ae2d53e..b2479a2a49e5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _NFP_PORT_H_
#define _NFP_PORT_H_
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
index 0ecd83705368..814360ed3a20 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
index 6cee6382deb4..afab6f0fc564 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index f44d0a857314..db94b0bddc92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
index 0e497a6154db..4a12133850f5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
#ifndef NFP6000_NFP6000_H
#define NFP6000_NFP6000_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
index 40fb19939505..9a86ec11c5ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_xpb.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index fd63d83bdea5..85d46f206b3c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp6000_pcie.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
index 245d8aaaa97d..6d1bffa6eac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp6000_pcie.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
index 31fe92247f51..3d172e255693 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_arm.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 123e29cba6d1..2dd0f5842873 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cpp.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index f7e1d79e735f..94994a939277 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cppcore.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 03fcde5fa137..3cfecf105bde 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cpplib.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 063a9a6243d6..f05dd34ab89f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
* after chip reset.
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
index 5f193fe2d69e..79e17943519e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_mip.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index c88bf673cb76..7bc17b94ac60 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index a164fbc85cd3..d4e02542e2e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nffw.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index 8d2cbdf4d517..49a4d3f56b56 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nffw.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index bf593a6b26a1..ce1577bbbd2a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nsp.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index bd6c9071c8e9..ff33ac54097a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#ifndef NSP_NSP_H
#define NSP_NSP_H 1
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index 5d362f87af08..0997d127144f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 7ca589660e4d..802c9224bb32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/* Authors: David Brunecz <david.brunecz@netronome.com>
* Jakub Kicinski <jakub.kicinski@netronome.com>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index d32af598da90..ce7492a6a98f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_resource.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 1ad0a015572e..75f012444796 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_rtsym.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
index f691c6587c76..79470f198a62 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_target.c
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index d5b587fccaa3..aea8579206ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index bf431ab86864..f2dfc7a976c0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12276,7 +12276,7 @@ struct public_func {
#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
u32 mac_upper;
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
@@ -12698,6 +12698,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@@ -12750,6 +12751,7 @@ struct public_drv_mb {
/* get MFW feature support response */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 58c7eb9d8e1b..b06e4cb72c6c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1247,6 +1247,52 @@ static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
}
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data, int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr;
+ u32 i, size;
+
+ func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MIN_BW);
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MAX_BW);
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_reset)
{
@@ -1274,10 +1320,29 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Virtual link_up = %d\n", p_link->link_up);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Physical link_up = %d\n", p_link->link_up);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1504,53 +1569,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
-static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
-{
- struct qed_mcp_function_info *p_info;
-
- p_info = &p_hwfn->mcp_info->func_info;
-
- p_info->bandwidth_min = (p_shmem_info->config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- p_info->bandwidth_min);
- p_info->bandwidth_min = 1;
- }
-
- p_info->bandwidth_max = (p_shmem_info->config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- p_info->bandwidth_max);
- p_info->bandwidth_max = 100;
- }
-}
-
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data, int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
- return size;
-}
-
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
@@ -3351,7 +3369,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp, mcp_param, features;
- features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7d3f671e1bb3..8c4f49adcf9e 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4269,8 +4269,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
@@ -4553,27 +4553,19 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
{
- static const struct rtl_cfg2_info {
- u32 mac_version;
- u32 clk;
- u32 val;
- } cfg2_info [] = {
- { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
- { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
- { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
- { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
- };
- const struct rtl_cfg2_info *p = cfg2_info;
- unsigned int i;
- u32 clk;
+ u32 val;
- clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz;
- for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
- if ((p->mac_version == mac_version) && (p->clk == clk)) {
- RTL_W32(tp, 0x7c, p->val);
- break;
- }
- }
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ val = 0x000fff00;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
+ val = 0x00ffff00;
+ else
+ return;
+
+ if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
+ val |= 0xff;
+
+ RTL_W32(tp, 0x7c, val);
}
static void rtl_set_rx_mode(struct net_device *dev)
@@ -6833,7 +6825,6 @@ static void rtl8169_net_suspend(struct net_device *dev)
phy_stop(dev->phydev);
netif_device_detach(dev);
- netif_stop_queue(dev);
rtl_lock_work(tp);
napi_disable(&tp->napi);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 7aa5ebb6766c..4289ccb26e4e 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
+ /* reading the register clears the irq */
+ netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
break;
+ }
/* This barrier is needed to keep us from reading
* any other fields out of the netsec_de until we have
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 16dcbf36f8cc..226be2a56c1f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -570,16 +570,14 @@ static inline int cpsw_get_slave_port(u32 slave_num)
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
{
struct cpsw_common *cpsw = priv->cpsw;
if (cpsw->data.dual_emac) {
struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
- int slave_port = cpsw_get_slave_port(slave->slave_num);
- cpsw_ale_add_mcast(cpsw->ale, addr,
- 1 << slave_port | ALE_PORT_HOST,
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
ALE_VLAN, slave->port_vlan, 0);
return;
}
@@ -662,16 +660,35 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ cpsw_add_mcast(priv, addr);
+ return 0;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- int vid;
+ int vid, flags;
- if (cpsw->data.dual_emac)
+ if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
- else
- vid = cpsw->data.default_vlan;
+ flags = ALE_VLAN;
+ } else {
+ vid = 0;
+ flags = 0;
+ }
+
+ cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -684,19 +701,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
-
- /* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
+ cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- if (!netdev_mc_empty(ndev)) {
- struct netdev_hw_addr *ha;
-
- /* program multicast address list into ALE register */
- netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(priv, ha->addr);
- }
- }
+ __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -1410,7 +1417,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
port_mask, port_mask, 0);
cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
- port_mask, ALE_VLAN, slave->port_vlan, 0);
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN |
ALE_SECURE, slave->port_vlan);
@@ -1956,6 +1963,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
+ __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2293,16 +2301,19 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
{
int ret;
int unreg_mcast_mask = 0;
+ int mcast_mask;
u32 port_mask;
struct cpsw_common *cpsw = priv->cpsw;
if (cpsw->data.dual_emac) {
port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+ mcast_mask = ALE_PORT_HOST;
if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = port_mask;
+ unreg_mcast_mask = mcast_mask;
} else {
port_mask = ALE_ALL_PORTS;
+ mcast_mask = port_mask;
if (priv->ndev->flags & IFF_ALLMULTI)
unreg_mcast_mask = ALE_ALL_PORTS;
@@ -2321,7 +2332,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
- port_mask, ALE_VLAN, vid, 0);
+ mcast_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 5766225a4ce1..798c989d5d93 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -136,7 +136,7 @@ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
}
-static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
+static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
{
int i;
@@ -175,7 +175,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, const u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -309,7 +309,7 @@ static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
}
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -336,7 +336,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
}
EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -352,7 +352,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
}
EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
-int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -386,7 +386,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
}
EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index d4fe9016429b..cd07a3e96d57 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -105,13 +105,13 @@ void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid);
-int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid);
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index 3a424c864f4d..d62e8c6205f7 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -15,6 +15,17 @@ config FDDI
if FDDI
+config DEFZA
+ tristate "DEC FDDIcontroller 700/700-C (DEFZA-xx) support"
+ depends on FDDI && TC
+ help
+ This is support for the DEC FDDIcontroller 700 (DEFZA-AA, fiber)
+ and 700-C (DEFZA-CA, copper) TURBOchannel network cards which
+ can connect you to a local FDDI network.
+
+ To compile this driver as a module, choose M here: the module
+ will be called defza. If unsure, say N.
+
config DEFXX
tristate "Digital DEFTA/DEFEA/DEFPA adapter support"
depends on FDDI && (PCI || EISA || TC)
diff --git a/drivers/net/fddi/Makefile b/drivers/net/fddi/Makefile
index 36da19c9a8aa..194b52cc20b0 100644
--- a/drivers/net/fddi/Makefile
+++ b/drivers/net/fddi/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_DEFXX) += defxx.o
+obj-$(CONFIG_DEFZA) += defza.o
obj-$(CONFIG_SKFP) += skfp/
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
new file mode 100644
index 000000000000..3b7f10a5f06a
--- /dev/null
+++ b/drivers/net/fddi/defza.c
@@ -0,0 +1,1564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * Dave Sawyer & Phil Weeks & Frank Itkowsky,
+ * "DEC FDDIcontroller 700 Port Specification",
+ * Revision 1.1, Digital Equipment Corporation
+ */
+
+/* ------------------------------------------------------------------------- */
+/* FZA configurable parameters. */
+
+/* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */
+#define FZA_RING_TX_MODE 0
+
+/* The number of receive ring descriptors; from 2 up to 256. */
+#define FZA_RING_RX_SIZE 256
+
+/* End of FZA configurable parameters. No need to change anything below. */
+/* ------------------------------------------------------------------------- */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/tc.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/barrier.h>
+
+#include "defza.h"
+
+#define DRV_NAME "defza"
+#define DRV_VERSION "v.1.1.4"
+#define DRV_RELDATE "Oct 6 2018"
+
+static char version[] =
+ DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
+MODULE_LICENSE("GPL");
+
+static int loopback;
+module_param(loopback, int, 0644);
+
+/* Ring Purger Multicast */
+static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
+/* Directed Beacon Multicast */
+static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
+
+/* Shorthands for MMIO accesses that we require to be strongly ordered
+ * WRT preceding MMIO accesses.
+ */
+#define readw_o readw_relaxed
+#define readl_o readl_relaxed
+
+#define writew_o writew_relaxed
+#define writel_o writel_relaxed
+
+/* Shorthands for MMIO accesses that we are happy with being weakly ordered
+ * WRT preceding MMIO accesses.
+ */
+#define readw_u readw_relaxed
+#define readl_u readl_relaxed
+#define readq_u readq_relaxed
+
+#define writew_u writew_relaxed
+#define writel_u writel_relaxed
+#define writeq_u writeq_relaxed
+
+static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+}
+
+static inline struct sk_buff *fza_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_KERNEL);
+}
+
+static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
+{
+ unsigned long x, y;
+
+ x = (unsigned long)skb->data;
+ y = ALIGN(x, v);
+
+ skb_reserve(skb, y - x);
+}
+
+static inline void fza_reads(const void __iomem *from, void *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 __iomem *src = from;
+ const u32 __iomem *src_trail;
+ u64 *dst = to;
+ u32 *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ *dst++ = readq_u(src++);
+ if (size) {
+ src_trail = (u32 __iomem *)src;
+ dst_trail = (u32 *)dst;
+ *dst_trail = readl_u(src_trail);
+ }
+ } else {
+ const u32 __iomem *src = from;
+ u32 *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ *dst++ = readl_u(src++);
+ }
+}
+
+static inline void fza_writes(const void *from, void __iomem *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 *src = from;
+ const u32 *src_trail;
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(*src++, dst++);
+ if (size) {
+ src_trail = (u32 *)src;
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(*src_trail, dst_trail);
+ }
+ } else {
+ const u32 *src = from;
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(*src++, dst++);
+ }
+}
+
+static inline void fza_moves(const void __iomem *from, void __iomem *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 __iomem *src = from;
+ const u32 __iomem *src_trail;
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(readq_u(src++), dst++);
+ if (size) {
+ src_trail = (u32 __iomem *)src;
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(readl_u(src_trail), dst_trail);
+ }
+ } else {
+ const u32 __iomem *src = from;
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(readl_u(src++), dst++);
+ }
+}
+
+static inline void fza_zeros(void __iomem *to, unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(0, dst++);
+ if (size) {
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(0, dst_trail);
+ }
+ } else {
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(0, dst++);
+ }
+}
+
+static inline void fza_regs_dump(struct fza_private *fp)
+{
+ pr_debug("%s: iomem registers:\n", fp->name);
+ pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset));
+ pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event));
+ pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status));
+ pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask));
+ pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a));
+ pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b));
+}
+
+static inline void fza_do_reset(struct fza_private *fp)
+{
+ /* Reset the board. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+ readw_o(&fp->regs->reset); /* Read it back for a small delay. */
+ writew_o(FZA_RESET_CLR, &fp->regs->reset);
+
+ /* Enable all interrupt events we handle. */
+ writew_o(fp->int_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+}
+
+static inline void fza_do_shutdown(struct fza_private *fp)
+{
+ /* Disable the driver mode. */
+ writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b);
+
+ /* And reset the board. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+ writew_o(FZA_RESET_CLR, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+}
+
+static int fza_reset(struct fza_private *fp)
+{
+ unsigned long flags;
+ uint status, state;
+ long t;
+
+ pr_info("%s: resetting the board...\n", fp->name);
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->state_chg_flag = 0;
+ fza_do_reset(fp);
+ spin_unlock_irqrestore(&fp->lock, flags);
+
+ /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA
+ * rev. C03 happily finishes in 9.7 seconds. :-) But we need to
+ * be on the safe side...
+ */
+ t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
+ 45 * HZ);
+ status = readw_u(&fp->regs->status);
+ state = FZA_STATUS_GET_STATE(status);
+ if (fp->state_chg_flag == 0) {
+ pr_err("%s: RESET timed out!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ if (state != FZA_STATE_UNINITIALIZED) {
+ pr_err("%s: RESET failed!, state %x, failure ID %x\n",
+ fp->name, state, FZA_STATUS_GET_TEST(status));
+ return -EIO;
+ }
+ pr_info("%s: OK\n", fp->name);
+ pr_debug("%s: RESET: %lums elapsed\n", fp->name,
+ (45 * HZ - t) * 1000 / HZ);
+
+ return 0;
+}
+
+static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev,
+ int command)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
+ unsigned int old_mask, new_mask;
+ union fza_cmd_buf __iomem *buf;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ old_mask = fp->int_mask;
+ new_mask = old_mask & ~FZA_MASK_STATE_CHG;
+ writew_u(new_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+ fp->int_mask = new_mask;
+
+ buf = fp->mmio + readl_u(&ring->buffer);
+
+ if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
+ FZA_RING_OWN_HOST) {
+ pr_warn("%s: command buffer full, command: %u!\n", fp->name,
+ command);
+ return NULL;
+ }
+
+ switch (command) {
+ case FZA_RING_CMD_INIT:
+ writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode);
+ writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size);
+ fza_zeros(&buf->init.counters, sizeof(buf->init.counters));
+ break;
+
+ case FZA_RING_CMD_MODCAM:
+ i = 0;
+ fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i >= FZA_CMD_CAM_SIZE)
+ break;
+ fza_writes(ha->addr, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ }
+ while (i < FZA_CMD_CAM_SIZE)
+ fza_zeros(&buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ break;
+
+ case FZA_RING_CMD_PARAM:
+ writel_u(loopback, &buf->param.loop_mode);
+ writel_u(fp->t_max, &buf->param.t_max);
+ writel_u(fp->t_req, &buf->param.t_req);
+ writel_u(fp->tvx, &buf->param.tvx);
+ writel_u(fp->lem_threshold, &buf->param.lem_threshold);
+ fza_writes(&fp->station_id, &buf->param.station_id,
+ sizeof(buf->param.station_id));
+ /* Convert to milliseconds due to buggy firmware. */
+ writel_u(fp->rtoken_timeout / 12500,
+ &buf->param.rtoken_timeout);
+ writel_u(fp->ring_purger, &buf->param.ring_purger);
+ break;
+
+ case FZA_RING_CMD_MODPROM:
+ if (dev->flags & IFF_PROMISC) {
+ writel_u(1, &buf->modprom.llc_prom);
+ writel_u(1, &buf->modprom.smt_prom);
+ } else {
+ writel_u(0, &buf->modprom.llc_prom);
+ writel_u(0, &buf->modprom.smt_prom);
+ }
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2)
+ writel_u(1, &buf->modprom.llc_multi);
+ else
+ writel_u(0, &buf->modprom.llc_multi);
+ writel_u(1, &buf->modprom.llc_bcast);
+ break;
+ }
+
+ /* Trigger the command. */
+ writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
+ writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a);
+
+ fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE;
+
+ fp->int_mask = old_mask;
+ writew_u(fp->int_mask, &fp->regs->int_mask);
+
+ return ring;
+}
+
+static int fza_init_send(struct net_device *dev,
+ struct fza_cmd_init *__iomem *init)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring;
+ unsigned long flags;
+ u32 stat;
+ long t;
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->cmd_done_flag = 0;
+ ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
+ spin_unlock_irqrestore(&fp->lock, flags);
+ if (!ring)
+ /* This should never happen in the uninitialized state,
+ * so do not try to recover and just consider it fatal.
+ */
+ return -ENOBUFS;
+
+ /* INIT may take quite a long time (160ms for my C03). */
+ t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
+ if (fp->cmd_done_flag == 0) {
+ pr_err("%s: INIT command timed out!, state %x\n", fp->name,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ stat = readl_u(&ring->stat);
+ if (stat != FZA_RING_STAT_SUCCESS) {
+ pr_err("%s: INIT command failed!, status %02x, state %x\n",
+ fp->name, stat,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ pr_debug("%s: INIT: %lums elapsed\n", fp->name,
+ (3 * HZ - t) * 1000 / HZ);
+
+ if (init)
+ *init = fp->mmio + readl_u(&ring->buffer);
+ return 0;
+}
+
+static void fza_rx_init(struct fza_private *fp)
+{
+ int i;
+
+ /* Fill the host receive descriptor ring. */
+ for (i = 0; i < FZA_RING_RX_SIZE; i++) {
+ writel_o(0, &fp->ring_hst_rx[i].rmc);
+ writel_o((fp->rx_dma[i] + 0x1000) >> 9,
+ &fp->ring_hst_rx[i].buffer1);
+ writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA,
+ &fp->ring_hst_rx[i].buf0_own);
+ }
+}
+
+static void fza_set_rx_mode(struct net_device *dev)
+{
+ fza_cmd_send(dev, FZA_RING_CMD_MODCAM);
+ fza_cmd_send(dev, FZA_RING_CMD_MODPROM);
+}
+
+union fza_buffer_txp {
+ struct fza_buffer_tx *data_ptr;
+ struct fza_buffer_tx __iomem *mmio_ptr;
+};
+
+static int fza_do_xmit(union fza_buffer_txp ub, int len,
+ struct net_device *dev, int smt)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *rmc_tx_ptr;
+ int i, first, frag_len, left_len;
+ u32 own, rmc;
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) < len)
+ return 1;
+
+ first = fp->ring_rmc_tx_index;
+
+ left_len = len;
+ frag_len = FZA_TX_BUFFER_SIZE;
+ /* First descriptor is relinquished last. */
+ own = FZA_RING_TX_OWN_HOST;
+ /* First descriptor carries frame length; we don't use cut-through. */
+ rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len;
+ do {
+ i = fp->ring_rmc_tx_index;
+ rmc_tx_ptr = &fp->buffer_tx[i];
+
+ if (left_len < FZA_TX_BUFFER_SIZE)
+ frag_len = left_len;
+ left_len -= frag_len;
+
+ /* Length must be a multiple of 4 as only word writes are
+ * permitted!
+ */
+ frag_len = (frag_len + 3) & ~3;
+ if (smt)
+ fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len);
+ else
+ fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len);
+
+ if (left_len == 0)
+ rmc |= FZA_RING_TX_EOP; /* Mark last frag. */
+
+ writel_o(rmc, &fp->ring_rmc_tx[i].rmc);
+ writel_o(own, &fp->ring_rmc_tx[i].own);
+
+ ub.data_ptr++;
+ fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) %
+ fp->ring_rmc_tx_size;
+
+ /* Settings for intermediate frags. */
+ own = FZA_RING_TX_OWN_RMC;
+ rmc = 0;
+ } while (left_len > 0);
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) {
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ }
+
+ writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own);
+
+ /* Go, go, go! */
+ writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a);
+
+ return 0;
+}
+
+static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len,
+ u32 rmc, struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *smt_rx_ptr;
+ u32 own;
+ int i;
+
+ i = fp->ring_smt_rx_index;
+ own = readl_o(&fp->ring_smt_rx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ return 1;
+
+ smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer);
+
+ /* Length must be a multiple of 4 as only word writes are permitted! */
+ fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3);
+
+ writel_o(rmc, &fp->ring_smt_rx[i].rmc);
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own);
+
+ fp->ring_smt_rx_index =
+ (fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size;
+
+ /* Grab it! */
+ writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a);
+
+ return 0;
+}
+
+static void fza_tx(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own, rmc;
+ int i;
+
+ while (1) {
+ i = fp->ring_rmc_txd_index;
+ if (i == fp->ring_rmc_tx_index)
+ break;
+ own = readl_o(&fp->ring_rmc_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC)
+ break;
+
+ rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
+ /* Only process the first descriptor. */
+ if ((rmc & FZA_RING_TX_SOP) != 0) {
+ if ((rmc & FZA_RING_TX_DCC_MASK) ==
+ FZA_RING_TX_DCC_SUCCESS) {
+ int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3;
+ /* Omit PRH. */
+
+ fp->stats.tx_packets++;
+ fp->stats.tx_bytes += pkt_len;
+ } else {
+ fp->stats.tx_errors++;
+ switch (rmc & FZA_RING_TX_DCC_MASK) {
+ case FZA_RING_TX_DCC_DTP_SOP:
+ case FZA_RING_TX_DCC_DTP:
+ case FZA_RING_TX_DCC_ABORT:
+ fp->stats.tx_aborted_errors++;
+ break;
+ case FZA_RING_TX_DCC_UNDRRUN:
+ fp->stats.tx_fifo_errors++;
+ break;
+ case FZA_RING_TX_DCC_PARITY:
+ default:
+ break;
+ }
+ }
+ }
+
+ fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) %
+ fp->ring_rmc_tx_size;
+ }
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) {
+ if (fp->queue_active) {
+ netif_wake_queue(dev);
+ pr_debug("%s: queue woken\n", fp->name);
+ }
+ }
+}
+
+static inline int fza_rx_err(struct fza_private *fp,
+ const u32 rmc, const u8 fc)
+{
+ int len, min_len, max_len;
+
+ len = rmc & FZA_RING_PBC_MASK;
+
+ if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) {
+ fp->stats.rx_errors++;
+
+ /* Check special status codes. */
+ if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
+ FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
+ (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
+ FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) {
+ if (len >= 8190)
+ fp->stats.rx_length_errors++;
+ return 1;
+ }
+ if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
+ FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
+ (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
+ FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) {
+ /* Halt the interface to trigger a reset. */
+ writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ return 1;
+ }
+
+ /* Check the MAC status. */
+ switch (rmc & FZA_RING_RX_RRR_MASK) {
+ case FZA_RING_RX_RRR_OK:
+ if ((rmc & FZA_RING_RX_CRC) != 0)
+ fp->stats.rx_crc_errors++;
+ else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 ||
+ (rmc & FZA_RING_RX_FSB_ERR) != 0)
+ fp->stats.rx_frame_errors++;
+ return 1;
+ case FZA_RING_RX_RRR_SADDR:
+ case FZA_RING_RX_RRR_DADDR:
+ case FZA_RING_RX_RRR_ABORT:
+ /* Halt the interface to trigger a reset. */
+ writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ return 1;
+ case FZA_RING_RX_RRR_LENGTH:
+ fp->stats.rx_frame_errors++;
+ return 1;
+ default:
+ return 1;
+ }
+ }
+
+ /* Packet received successfully; validate the length. */
+ switch (fc & FDDI_FC_K_FORMAT_MASK) {
+ case FDDI_FC_K_FORMAT_MANAGEMENT:
+ if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC)
+ min_len = 37;
+ else
+ min_len = 17;
+ break;
+ case FDDI_FC_K_FORMAT_LLC:
+ min_len = 20;
+ break;
+ default:
+ min_len = 17;
+ break;
+ }
+ max_len = 4495;
+ if (len < min_len || len > max_len) {
+ fp->stats.rx_errors++;
+ fp->stats.rx_length_errors++;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void fza_rx(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct sk_buff *skb, *newskb;
+ struct fza_fddihdr *frame;
+ dma_addr_t dma, newdma;
+ u32 own, rmc, buf;
+ int i, len;
+ u8 fc;
+
+ while (1) {
+ i = fp->ring_hst_rx_index;
+ own = readl_o(&fp->ring_hst_rx[i].buf0_own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ rmc = readl_u(&fp->ring_hst_rx[i].rmc);
+ skb = fp->rx_skbuff[i];
+ dma = fp->rx_dma[i];
+
+ /* The RMC doesn't count the preamble and the starting
+ * delimiter. We fix it up here for a total of 3 octets.
+ */
+ dma_rmb();
+ len = (rmc & FZA_RING_PBC_MASK) + 3;
+ frame = (struct fza_fddihdr *)skb->data;
+
+ /* We need to get at real FC. */
+ dma_sync_single_for_cpu(fp->bdev,
+ dma +
+ ((u8 *)&frame->hdr.fc - (u8 *)frame),
+ sizeof(frame->hdr.fc),
+ DMA_FROM_DEVICE);
+ fc = frame->hdr.fc;
+
+ if (fza_rx_err(fp, rmc, fc))
+ goto err_rx;
+
+ /* We have to 512-byte-align RX buffers... */
+ newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511);
+ if (newskb) {
+ fza_skb_align(newskb, 512);
+ newdma = dma_map_single(fp->bdev, newskb->data,
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fp->bdev, newdma)) {
+ dev_kfree_skb_irq(newskb);
+ newskb = NULL;
+ }
+ }
+ if (newskb) {
+ int pkt_len = len - 7; /* Omit P, SD and FCS. */
+ int is_multi;
+ int rx_stat;
+
+ dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* Queue SMT frames to the SMT receive ring. */
+ if ((fc & (FDDI_FC_K_CLASS_MASK |
+ FDDI_FC_K_FORMAT_MASK)) ==
+ (FDDI_FC_K_CLASS_ASYNC |
+ FDDI_FC_K_FORMAT_MANAGEMENT) &&
+ (rmc & FZA_RING_RX_DA_MASK) !=
+ FZA_RING_RX_DA_PROM) {
+ if (fza_do_recv_smt((struct fza_buffer_tx *)
+ skb->data, len, rmc,
+ dev)) {
+ writel_o(FZA_CONTROL_A_SMT_RX_OVFL,
+ &fp->regs->control_a);
+ }
+ }
+
+ is_multi = ((frame->hdr.daddr[0] & 0x01) != 0);
+
+ skb_reserve(skb, 3); /* Skip over P and SD. */
+ skb_put(skb, pkt_len); /* And cut off FCS. */
+ skb->protocol = fddi_type_trans(skb, dev);
+
+ rx_stat = netif_rx(skb);
+ if (rx_stat != NET_RX_DROP) {
+ fp->stats.rx_packets++;
+ fp->stats.rx_bytes += pkt_len;
+ if (is_multi)
+ fp->stats.multicast++;
+ } else {
+ fp->stats.rx_dropped++;
+ }
+
+ skb = newskb;
+ dma = newdma;
+ fp->rx_skbuff[i] = skb;
+ fp->rx_dma[i] = dma;
+ } else {
+ fp->stats.rx_dropped++;
+ pr_notice("%s: memory squeeze, dropping packet\n",
+ fp->name);
+ }
+
+err_rx:
+ writel_o(0, &fp->ring_hst_rx[i].rmc);
+ buf = (dma + 0x1000) >> 9;
+ writel_o(buf, &fp->ring_hst_rx[i].buffer1);
+ buf = dma >> 9 | FZA_RING_OWN_FZA;
+ writel_o(buf, &fp->ring_hst_rx[i].buf0_own);
+ fp->ring_hst_rx_index =
+ (fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size;
+ }
+}
+
+static void fza_tx_smt(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+ int i, len;
+ u32 own;
+
+ while (1) {
+ i = fp->ring_smt_tx_index;
+ own = readl_o(&fp->ring_smt_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer);
+ len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK;
+
+ if (!netif_queue_stopped(dev)) {
+ if (dev_nit_active(dev)) {
+ struct sk_buff *skb;
+
+ /* Length must be a multiple of 4 as only word
+ * reads are permitted!
+ */
+ skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
+ if (!skb)
+ goto err_no_skb; /* Drop. */
+
+ skb_data_ptr = (struct fza_buffer_tx *)
+ skb->data;
+
+ fza_reads(smt_tx_ptr, skb_data_ptr,
+ (len + 3) & ~3);
+ skb->dev = dev;
+ skb_reserve(skb, 3); /* Skip over PRH. */
+ skb_put(skb, len - 3);
+ skb_reset_network_header(skb);
+
+ dev_queue_xmit_nit(skb, dev);
+
+ dev_kfree_skb_irq(skb);
+
+err_no_skb:
+ ;
+ }
+
+ /* Queue the frame to the RMC transmit ring. */
+ fza_do_xmit((union fza_buffer_txp)
+ { .mmio_ptr = smt_tx_ptr },
+ len, dev, 1);
+ }
+
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
+ fp->ring_smt_tx_index =
+ (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
+ }
+}
+
+static void fza_uns(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own;
+ int i;
+
+ while (1) {
+ i = fp->ring_uns_index;
+ own = readl_o(&fp->ring_uns[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) {
+ fp->stats.rx_errors++;
+ fp->stats.rx_over_errors++;
+ }
+
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own);
+ fp->ring_uns_index =
+ (fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE;
+ }
+}
+
+static void fza_tx_flush(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own;
+ int i;
+
+ /* Clean up the SMT TX ring. */
+ i = fp->ring_smt_tx_index;
+ do {
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
+ fp->ring_smt_tx_index =
+ (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
+
+ } while (i != fp->ring_smt_tx_index);
+
+ /* Clean up the RMC TX ring. */
+ i = fp->ring_rmc_tx_index;
+ do {
+ own = readl_o(&fp->ring_rmc_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) {
+ u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
+
+ writel_u(rmc | FZA_RING_TX_DTP,
+ &fp->ring_rmc_tx[i].rmc);
+ }
+ fp->ring_rmc_tx_index =
+ (fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size;
+
+ } while (i != fp->ring_rmc_tx_index);
+
+ /* Done. */
+ writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a);
+}
+
+static irqreturn_t fza_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct fza_private *fp = netdev_priv(dev);
+ uint int_event;
+
+ /* Get interrupt events. */
+ int_event = readw_o(&fp->regs->int_event) & fp->int_mask;
+ if (int_event == 0)
+ return IRQ_NONE;
+
+ /* Clear the events. */
+ writew_u(int_event, &fp->regs->int_event);
+
+ /* Now handle the events. The order matters. */
+
+ /* Command finished interrupt. */
+ if ((int_event & FZA_EVENT_CMD_DONE) != 0) {
+ fp->irq_count_cmd_done++;
+
+ spin_lock(&fp->lock);
+ fp->cmd_done_flag = 1;
+ wake_up(&fp->cmd_done_wait);
+ spin_unlock(&fp->lock);
+ }
+
+ /* Transmit finished interrupt. */
+ if ((int_event & FZA_EVENT_TX_DONE) != 0) {
+ fp->irq_count_tx_done++;
+ fza_tx(dev);
+ }
+
+ /* Host receive interrupt. */
+ if ((int_event & FZA_EVENT_RX_POLL) != 0) {
+ fp->irq_count_rx_poll++;
+ fza_rx(dev);
+ }
+
+ /* SMT transmit interrupt. */
+ if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) {
+ fp->irq_count_smt_tx_poll++;
+ fza_tx_smt(dev);
+ }
+
+ /* Transmit ring flush request. */
+ if ((int_event & FZA_EVENT_FLUSH_TX) != 0) {
+ fp->irq_count_flush_tx++;
+ fza_tx_flush(dev);
+ }
+
+ /* Link status change interrupt. */
+ if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) {
+ uint status;
+
+ fp->irq_count_link_st_chg++;
+ status = readw_u(&fp->regs->status);
+ if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) {
+ netif_carrier_on(dev);
+ pr_info("%s: link available\n", fp->name);
+ } else {
+ netif_carrier_off(dev);
+ pr_info("%s: link unavailable\n", fp->name);
+ }
+ }
+
+ /* Unsolicited event interrupt. */
+ if ((int_event & FZA_EVENT_UNS_POLL) != 0) {
+ fp->irq_count_uns_poll++;
+ fza_uns(dev);
+ }
+
+ /* State change interrupt. */
+ if ((int_event & FZA_EVENT_STATE_CHG) != 0) {
+ uint status, state;
+
+ fp->irq_count_state_chg++;
+
+ status = readw_u(&fp->regs->status);
+ state = FZA_STATUS_GET_STATE(status);
+ pr_debug("%s: state change: %x\n", fp->name, state);
+ switch (state) {
+ case FZA_STATE_RESET:
+ break;
+
+ case FZA_STATE_UNINITIALIZED:
+ netif_carrier_off(dev);
+ del_timer_sync(&fp->reset_timer);
+ fp->ring_cmd_index = 0;
+ fp->ring_uns_index = 0;
+ fp->ring_rmc_tx_index = 0;
+ fp->ring_rmc_txd_index = 0;
+ fp->ring_hst_rx_index = 0;
+ fp->ring_smt_tx_index = 0;
+ fp->ring_smt_rx_index = 0;
+ if (fp->state > state) {
+ pr_info("%s: OK\n", fp->name);
+ fza_cmd_send(dev, FZA_RING_CMD_INIT);
+ }
+ break;
+
+ case FZA_STATE_INITIALIZED:
+ if (fp->state > state) {
+ fza_set_rx_mode(dev);
+ fza_cmd_send(dev, FZA_RING_CMD_PARAM);
+ }
+ break;
+
+ case FZA_STATE_RUNNING:
+ case FZA_STATE_MAINTENANCE:
+ fp->state = state;
+ fza_rx_init(fp);
+ fp->queue_active = 1;
+ netif_wake_queue(dev);
+ pr_debug("%s: queue woken\n", fp->name);
+ break;
+
+ case FZA_STATE_HALTED:
+ fp->queue_active = 0;
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ del_timer_sync(&fp->reset_timer);
+ pr_warn("%s: halted, reason: %x\n", fp->name,
+ FZA_STATUS_GET_HALT(status));
+ fza_regs_dump(fp);
+ pr_info("%s: resetting the board...\n", fp->name);
+ fza_do_reset(fp);
+ fp->timer_state = 0;
+ fp->reset_timer.expires = jiffies + 45 * HZ;
+ add_timer(&fp->reset_timer);
+ break;
+
+ default:
+ pr_warn("%s: undefined state: %x\n", fp->name, state);
+ break;
+ }
+
+ spin_lock(&fp->lock);
+ fp->state_chg_flag = 1;
+ wake_up(&fp->state_chg_wait);
+ spin_unlock(&fp->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fza_reset_timer(struct timer_list *t)
+{
+ struct fza_private *fp = from_timer(fp, t, reset_timer);
+
+ if (!fp->timer_state) {
+ pr_err("%s: RESET timed out!\n", fp->name);
+ pr_info("%s: trying harder...\n", fp->name);
+
+ /* Assert the board reset. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+
+ fp->timer_state = 1;
+ fp->reset_timer.expires = jiffies + HZ;
+ } else {
+ /* Clear the board reset. */
+ writew_u(FZA_RESET_CLR, &fp->regs->reset);
+
+ /* Enable all interrupt events we handle. */
+ writew_o(fp->int_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+
+ fp->timer_state = 0;
+ fp->reset_timer.expires = jiffies + 45 * HZ;
+ }
+ add_timer(&fp->reset_timer);
+}
+
+static int fza_set_mac_address(struct net_device *dev, void *addr)
+{
+ return -EOPNOTSUPP;
+}
+
+static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ unsigned int old_mask, new_mask;
+ int ret;
+ u8 fc;
+
+ skb_push(skb, 3); /* Make room for PRH. */
+
+ /* Decode FC to set PRH. */
+ fc = skb->data[3];
+ skb->data[0] = 0;
+ skb->data[1] = 0;
+ skb->data[2] = FZA_PRH2_NORMAL;
+ if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC)
+ skb->data[0] |= FZA_PRH0_FRAME_SYNC;
+ switch (fc & FDDI_FC_K_FORMAT_MASK) {
+ case FDDI_FC_K_FORMAT_MANAGEMENT:
+ if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) {
+ /* Token. */
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
+ } else {
+ /* SMT or MAC. */
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
+ }
+ skb->data[1] |= FZA_PRH1_CRC_NORMAL;
+ break;
+ case FDDI_FC_K_FORMAT_LLC:
+ case FDDI_FC_K_FORMAT_FUTURE:
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
+ break;
+ case FDDI_FC_K_FORMAT_IMPLEMENTOR:
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
+ break;
+ }
+
+ /* SMT transmit interrupts may sneak frames into the RMC
+ * transmit ring. We disable them while queueing a frame
+ * to maintain consistency.
+ */
+ old_mask = fp->int_mask;
+ new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL;
+ writew_u(new_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+ fp->int_mask = new_mask;
+ ret = fza_do_xmit((union fza_buffer_txp)
+ { .data_ptr = (struct fza_buffer_tx *)skb->data },
+ skb->len, dev, 0);
+ fp->int_mask = old_mask;
+ writew_u(fp->int_mask, &fp->regs->int_mask);
+
+ if (ret) {
+ /* Probably an SMT packet filled the remaining space,
+ * so just stop the queue, but don't report it as an error.
+ */
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ fp->stats.tx_dropped++;
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int fza_open(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring;
+ struct sk_buff *skb;
+ unsigned long flags;
+ dma_addr_t dma;
+ int ret, i;
+ u32 stat;
+ long t;
+
+ for (i = 0; i < FZA_RING_RX_SIZE; i++) {
+ /* We have to 512-byte-align RX buffers... */
+ skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
+ if (skb) {
+ fza_skb_align(skb, 512);
+ dma = dma_map_single(fp->bdev, skb->data,
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fp->bdev, dma)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+ if (!skb) {
+ for (--i; i >= 0; i--) {
+ dma_unmap_single(fp->bdev, fp->rx_dma[i],
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(fp->rx_skbuff[i]);
+ fp->rx_dma[i] = 0;
+ fp->rx_skbuff[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ fp->rx_skbuff[i] = skb;
+ fp->rx_dma[i] = dma;
+ }
+
+ ret = fza_init_send(dev, NULL);
+ if (ret != 0)
+ return ret;
+
+ /* Purger and Beacon multicasts need to be supplied before PARAM. */
+ fza_set_rx_mode(dev);
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->cmd_done_flag = 0;
+ ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
+ spin_unlock_irqrestore(&fp->lock, flags);
+ if (!ring)
+ return -ENOBUFS;
+
+ t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
+ if (fp->cmd_done_flag == 0) {
+ pr_err("%s: PARAM command timed out!, state %x\n", fp->name,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ stat = readl_u(&ring->stat);
+ if (stat != FZA_RING_STAT_SUCCESS) {
+ pr_err("%s: PARAM command failed!, status %02x, state %x\n",
+ fp->name, stat,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ pr_debug("%s: PARAM: %lums elapsed\n", fp->name,
+ (3 * HZ - t) * 1000 / HZ);
+
+ return 0;
+}
+
+static int fza_close(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ unsigned long flags;
+ uint state;
+ long t;
+ int i;
+
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+
+ del_timer_sync(&fp->reset_timer);
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->state = FZA_STATE_UNINITIALIZED;
+ fp->state_chg_flag = 0;
+ /* Shut the interface down. */
+ writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ spin_unlock_irqrestore(&fp->lock, flags);
+
+ /* DEC says SHUT needs up to 10 seconds to complete. */
+ t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
+ 15 * HZ);
+ state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status));
+ if (fp->state_chg_flag == 0) {
+ pr_err("%s: SHUT timed out!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ if (state != FZA_STATE_UNINITIALIZED) {
+ pr_err("%s: SHUT failed!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ pr_debug("%s: SHUT: %lums elapsed\n", fp->name,
+ (15 * HZ - t) * 1000 / HZ);
+
+ for (i = 0; i < FZA_RING_RX_SIZE; i++)
+ if (fp->rx_skbuff[i]) {
+ dma_unmap_single(fp->bdev, fp->rx_dma[i],
+ FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(fp->rx_skbuff[i]);
+ fp->rx_dma[i] = 0;
+ fp->rx_skbuff[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *fza_get_stats(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+
+ return &fp->stats;
+}
+
+static int fza_probe(struct device *bdev)
+{
+ static const struct net_device_ops netdev_ops = {
+ .ndo_open = fza_open,
+ .ndo_stop = fza_close,
+ .ndo_start_xmit = fza_start_xmit,
+ .ndo_set_rx_mode = fza_set_rx_mode,
+ .ndo_set_mac_address = fza_set_mac_address,
+ .ndo_get_stats = fza_get_stats,
+ };
+ static int version_printed;
+ char rom_rev[4], fw_rev[4], rmc_rev[4];
+ struct tc_dev *tdev = to_tc_dev(bdev);
+ struct fza_cmd_init __iomem *init;
+ resource_size_t start, len;
+ struct net_device *dev;
+ struct fza_private *fp;
+ uint smt_ver, pmd_type;
+ void __iomem *mmio;
+ uint hw_addr[2];
+ int ret, i;
+
+ if (!version_printed) {
+ pr_info("%s", version);
+ version_printed = 1;
+ }
+
+ dev = alloc_fddidev(sizeof(*fp));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, bdev);
+
+ fp = netdev_priv(dev);
+ dev_set_drvdata(bdev, dev);
+
+ fp->bdev = bdev;
+ fp->name = dev_name(bdev);
+
+ /* Request the I/O MEM resource. */
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(bdev))) {
+ pr_err("%s: cannot reserve MMIO region\n", fp->name);
+ ret = -EBUSY;
+ goto err_out_kfree;
+ }
+
+ /* MMIO mapping setup. */
+ mmio = ioremap_nocache(start, len);
+ if (!mmio) {
+ pr_err("%s: cannot map MMIO\n", fp->name);
+ ret = -ENOMEM;
+ goto err_out_resource;
+ }
+
+ /* Initialize the new device structure. */
+ switch (loopback) {
+ case FZA_LOOP_NORMAL:
+ case FZA_LOOP_INTERN:
+ case FZA_LOOP_EXTERN:
+ break;
+ default:
+ loopback = FZA_LOOP_NORMAL;
+ }
+
+ fp->mmio = mmio;
+ dev->irq = tdev->interrupt;
+
+ pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
+ fp->name, (long long)tdev->resource.start, dev->irq);
+ pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio);
+
+ fp->regs = mmio + FZA_REG_BASE;
+ fp->ring_cmd = mmio + FZA_RING_CMD;
+ fp->ring_uns = mmio + FZA_RING_UNS;
+
+ init_waitqueue_head(&fp->state_chg_wait);
+ init_waitqueue_head(&fp->cmd_done_wait);
+ spin_lock_init(&fp->lock);
+ fp->int_mask = FZA_MASK_NORMAL;
+
+ timer_setup(&fp->reset_timer, fza_reset_timer, 0);
+
+ /* Sanitize the board. */
+ fza_regs_dump(fp);
+ fza_do_shutdown(fp);
+
+ ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev);
+ if (ret != 0) {
+ pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq);
+ goto err_out_map;
+ }
+
+ /* Enable the driver mode. */
+ writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b);
+
+ /* For some reason transmit done interrupts can trigger during
+ * reset. This avoids a division error in the handler.
+ */
+ fp->ring_rmc_tx_size = FZA_RING_TX_SIZE;
+
+ ret = fza_reset(fp);
+ if (ret != 0)
+ goto err_out_irq;
+
+ ret = fza_init_send(dev, &init);
+ if (ret != 0)
+ goto err_out_irq;
+
+ fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
+ memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
+
+ fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
+ fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
+ fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev));
+ for (i = 3; i >= 0 && rom_rev[i] == ' '; i--)
+ rom_rev[i] = 0;
+ for (i = 3; i >= 0 && fw_rev[i] == ' '; i--)
+ fw_rev[i] = 0;
+ for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--)
+ rmc_rev[i] = 0;
+
+ fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx);
+ fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size);
+ fp->ring_hst_rx = mmio + readl_u(&init->hst_rx);
+ fp->ring_hst_rx_size = readl_u(&init->hst_rx_size);
+ fp->ring_smt_tx = mmio + readl_u(&init->smt_tx);
+ fp->ring_smt_tx_size = readl_u(&init->smt_tx_size);
+ fp->ring_smt_rx = mmio + readl_u(&init->smt_rx);
+ fp->ring_smt_rx_size = readl_u(&init->smt_rx_size);
+
+ fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx));
+
+ fp->t_max = readl_u(&init->def_t_max);
+ fp->t_req = readl_u(&init->def_t_req);
+ fp->tvx = readl_u(&init->def_tvx);
+ fp->lem_threshold = readl_u(&init->lem_threshold);
+ fza_reads(&init->def_station_id, &fp->station_id,
+ sizeof(fp->station_id));
+ fp->rtoken_timeout = readl_u(&init->rtoken_timeout);
+ fp->ring_purger = readl_u(&init->ring_purger);
+
+ smt_ver = readl_u(&init->smt_ver);
+ pmd_type = readl_u(&init->pmd_type);
+
+ pr_debug("%s: INIT parameters:\n", fp->name);
+ pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode));
+ pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size));
+ pr_debug(" rmc_rev: %.4s\n", rmc_rev);
+ pr_debug(" rom_rev: %.4s\n", rom_rev);
+ pr_debug(" fw_rev: %.4s\n", fw_rev);
+ pr_debug(" mop_type: %u\n", readl_u(&init->mop_type));
+ pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx));
+ pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx));
+ pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size));
+ pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx));
+ pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size));
+ pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx));
+ pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size));
+ /* TC systems are always LE, so don't bother swapping. */
+ pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ (readl_u(&init->hw_addr[0]) >> 0) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 8) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 16) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 24) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 0) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 8) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 16) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 24) & 0xff);
+ pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req));
+ pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx));
+ pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max));
+ pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold));
+ /* Don't bother swapping, see above. */
+ pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ (readl_u(&init->def_station_id[0]) >> 0) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 8) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 16) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 24) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 0) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 8) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 16) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 24) & 0xff);
+ pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt));
+ pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver));
+ pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout));
+ pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger));
+ pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max));
+ pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min));
+ pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type));
+
+ pr_info("%s: model %s, address %pMF\n",
+ fp->name,
+ pmd_type == FZA_PMD_TYPE_TW ?
+ "700-C (DEFZA-CA), ThinWire PMD selected" :
+ pmd_type == FZA_PMD_TYPE_STP ?
+ "700-C (DEFZA-CA), STP PMD selected" :
+ "700 (DEFZA-AA), MMF PMD",
+ dev->dev_addr);
+ pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
+ "SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver);
+
+ /* Now that we fetched initial parameters just shut the interface
+ * until opened.
+ */
+ ret = fza_close(dev);
+ if (ret != 0)
+ goto err_out_irq;
+
+ /* The FZA-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret != 0)
+ goto err_out_irq;
+
+ pr_info("%s: registered as %s\n", fp->name, dev->name);
+ fp->name = (const char *)dev->name;
+
+ get_device(bdev);
+ return 0;
+
+err_out_irq:
+ del_timer_sync(&fp->reset_timer);
+ fza_do_shutdown(fp);
+ free_irq(dev->irq, dev);
+
+err_out_map:
+ iounmap(mmio);
+
+err_out_resource:
+ release_mem_region(start, len);
+
+err_out_kfree:
+ free_netdev(dev);
+
+ pr_err("%s: initialization failure, aborting!\n", fp->name);
+ return ret;
+}
+
+static int fza_remove(struct device *bdev)
+{
+ struct net_device *dev = dev_get_drvdata(bdev);
+ struct fza_private *fp = netdev_priv(dev);
+ struct tc_dev *tdev = to_tc_dev(bdev);
+ resource_size_t start, len;
+
+ put_device(bdev);
+
+ unregister_netdev(dev);
+
+ del_timer_sync(&fp->reset_timer);
+ fza_do_shutdown(fp);
+ free_irq(dev->irq, dev);
+
+ iounmap(fp->mmio);
+
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ release_mem_region(start, len);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct tc_device_id const fza_tc_table[] = {
+ { "DEC ", "PMAF-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, fza_tc_table);
+
+static struct tc_driver fza_driver = {
+ .id_table = fza_tc_table,
+ .driver = {
+ .name = "defza",
+ .bus = &tc_bus_type,
+ .probe = fza_probe,
+ .remove = fza_remove,
+ },
+};
+
+static int fza_init(void)
+{
+ return tc_register_driver(&fza_driver);
+}
+
+static void fza_exit(void)
+{
+ tc_unregister_driver(&fza_driver);
+}
+
+module_init(fza_init);
+module_exit(fza_exit);
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
new file mode 100644
index 000000000000..b06acf32738e
--- /dev/null
+++ b/drivers/net/fddi/defza.h
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * Dave Sawyer & Phil Weeks & Frank Itkowsky,
+ * "DEC FDDIcontroller 700 Port Specification",
+ * Revision 1.1, Digital Equipment Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/if_fddi.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+/* IOmem register offsets. */
+#define FZA_REG_BASE 0x100000 /* register base address */
+#define FZA_REG_RESET 0x100200 /* reset, r/w */
+#define FZA_REG_INT_EVENT 0x100400 /* interrupt event, r/w1c */
+#define FZA_REG_STATUS 0x100402 /* status, r/o */
+#define FZA_REG_INT_MASK 0x100404 /* interrupt mask, r/w */
+#define FZA_REG_CONTROL_A 0x100500 /* control A, r/w1s */
+#define FZA_REG_CONTROL_B 0x100502 /* control B, r/w */
+
+/* Reset register constants. Bits 1:0 are r/w, others are fixed at 0. */
+#define FZA_RESET_DLU 0x0002 /* OR with INIT to blast flash memory */
+#define FZA_RESET_INIT 0x0001 /* switch into the reset state */
+#define FZA_RESET_CLR 0x0000 /* run self-test and return to work */
+
+/* Interrupt event register constants. All bits are r/w1c. */
+#define FZA_EVENT_DLU_DONE 0x0800 /* flash memory write complete */
+#define FZA_EVENT_FLUSH_TX 0x0400 /* transmit ring flush request */
+#define FZA_EVENT_PM_PARITY_ERR 0x0200 /* onboard packet memory parity err */
+#define FZA_EVENT_HB_PARITY_ERR 0x0100 /* host bus parity error */
+#define FZA_EVENT_NXM_ERR 0x0080 /* non-existent memory access error;
+ * also raised for unaligned and
+ * unsupported partial-word accesses
+ */
+#define FZA_EVENT_LINK_ST_CHG 0x0040 /* link status change */
+#define FZA_EVENT_STATE_CHG 0x0020 /* adapter state change */
+#define FZA_EVENT_UNS_POLL 0x0010 /* unsolicited event service request */
+#define FZA_EVENT_CMD_DONE 0x0008 /* command done ack */
+#define FZA_EVENT_SMT_TX_POLL 0x0004 /* SMT frame transmit request */
+#define FZA_EVENT_RX_POLL 0x0002 /* receive request (packet avail.) */
+#define FZA_EVENT_TX_DONE 0x0001 /* RMC transmit done ack */
+
+/* Status register constants. All bits are r/o. */
+#define FZA_STATUS_DLU_SHIFT 0xc /* down line upgrade status bits */
+#define FZA_STATUS_DLU_MASK 0x03
+#define FZA_STATUS_LINK_SHIFT 0xb /* link status bits */
+#define FZA_STATUS_LINK_MASK 0x01
+#define FZA_STATUS_STATE_SHIFT 0x8 /* adapter state bits */
+#define FZA_STATUS_STATE_MASK 0x07
+#define FZA_STATUS_HALT_SHIFT 0x0 /* halt reason bits */
+#define FZA_STATUS_HALT_MASK 0xff
+#define FZA_STATUS_TEST_SHIFT 0x0 /* test failure bits */
+#define FZA_STATUS_TEST_MASK 0xff
+
+#define FZA_STATUS_GET_DLU(x) (((x) >> FZA_STATUS_DLU_SHIFT) & \
+ FZA_STATUS_DLU_MASK)
+#define FZA_STATUS_GET_LINK(x) (((x) >> FZA_STATUS_LINK_SHIFT) & \
+ FZA_STATUS_LINK_MASK)
+#define FZA_STATUS_GET_STATE(x) (((x) >> FZA_STATUS_STATE_SHIFT) & \
+ FZA_STATUS_STATE_MASK)
+#define FZA_STATUS_GET_HALT(x) (((x) >> FZA_STATUS_HALT_SHIFT) & \
+ FZA_STATUS_HALT_MASK)
+#define FZA_STATUS_GET_TEST(x) (((x) >> FZA_STATUS_TEST_SHIFT) & \
+ FZA_STATUS_TEST_MASK)
+
+#define FZA_DLU_FAILURE 0x0 /* DLU catastrophic error; brain dead */
+#define FZA_DLU_ERROR 0x1 /* DLU error; old firmware intact */
+#define FZA_DLU_SUCCESS 0x2 /* DLU OK; new firmware loaded */
+
+#define FZA_LINK_OFF 0x0 /* link unavailable */
+#define FZA_LINK_ON 0x1 /* link available */
+
+#define FZA_STATE_RESET 0x0 /* resetting */
+#define FZA_STATE_UNINITIALIZED 0x1 /* after a reset */
+#define FZA_STATE_INITIALIZED 0x2 /* initialized */
+#define FZA_STATE_RUNNING 0x3 /* running (link active) */
+#define FZA_STATE_MAINTENANCE 0x4 /* running (link looped back) */
+#define FZA_STATE_HALTED 0x5 /* halted (error condition) */
+
+#define FZA_HALT_UNKNOWN 0x00 /* unknown reason */
+#define FZA_HALT_HOST 0x01 /* host-directed HALT */
+#define FZA_HALT_HB_PARITY 0x02 /* host bus parity error */
+#define FZA_HALT_NXM 0x03 /* adapter non-existent memory ref. */
+#define FZA_HALT_SW 0x04 /* adapter software fault */
+#define FZA_HALT_HW 0x05 /* adapter hardware fault */
+#define FZA_HALT_PC_TRACE 0x06 /* PC Trace path test */
+#define FZA_HALT_DLSW 0x07 /* data link software fault */
+#define FZA_HALT_DLHW 0x08 /* data link hardware fault */
+
+#define FZA_TEST_FATAL 0x00 /* self-test catastrophic failure */
+#define FZA_TEST_68K 0x01 /* 68000 CPU */
+#define FZA_TEST_SRAM_BWADDR 0x02 /* SRAM byte/word address */
+#define FZA_TEST_SRAM_DBUS 0x03 /* SRAM data bus */
+#define FZA_TEST_SRAM_STUCK1 0x04 /* SRAM stuck-at range 1 */
+#define FZA_TEST_SRAM_STUCK2 0x05 /* SRAM stuck-at range 2 */
+#define FZA_TEST_SRAM_COUPL1 0x06 /* SRAM coupling range 1 */
+#define FZA_TEST_SRAM_COUPL2 0x07 /* SRAM coupling */
+#define FZA_TEST_FLASH_CRC 0x08 /* Flash CRC */
+#define FZA_TEST_ROM 0x09 /* option ROM */
+#define FZA_TEST_PHY_CSR 0x0a /* PHY CSR */
+#define FZA_TEST_MAC_BIST 0x0b /* MAC BiST */
+#define FZA_TEST_MAC_CSR 0x0c /* MAC CSR */
+#define FZA_TEST_MAC_ADDR_UNIQ 0x0d /* MAC unique address */
+#define FZA_TEST_ELM_BIST 0x0e /* ELM BiST */
+#define FZA_TEST_ELM_CSR 0x0f /* ELM CSR */
+#define FZA_TEST_ELM_ADDR_UNIQ 0x10 /* ELM unique address */
+#define FZA_TEST_CAM 0x11 /* CAM */
+#define FZA_TEST_NIROM 0x12 /* NI ROM checksum */
+#define FZA_TEST_SC_LOOP 0x13 /* SC loopback packet */
+#define FZA_TEST_LM_LOOP 0x14 /* LM loopback packet */
+#define FZA_TEST_EB_LOOP 0x15 /* EB loopback packet */
+#define FZA_TEST_SC_LOOP_BYPS 0x16 /* SC bypass loopback packet */
+#define FZA_TEST_LM_LOOP_LOCAL 0x17 /* LM local loopback packet */
+#define FZA_TEST_EB_LOOP_LOCAL 0x18 /* EB local loopback packet */
+#define FZA_TEST_CDC_LOOP 0x19 /* CDC loopback packet */
+#define FZA_TEST_FIBER_LOOP 0x1A /* FIBER loopback packet */
+#define FZA_TEST_CAM_MATCH_LOOP 0x1B /* CAM match packet loopback */
+#define FZA_TEST_68K_IRQ_STUCK 0x1C /* 68000 interrupt line stuck-at */
+#define FZA_TEST_IRQ_PRESENT 0x1D /* interrupt present register */
+#define FZA_TEST_RMC_BIST 0x1E /* RMC BiST */
+#define FZA_TEST_RMC_CSR 0x1F /* RMC CSR */
+#define FZA_TEST_RMC_ADDR_UNIQ 0x20 /* RMC unique address */
+#define FZA_TEST_PM_DPATH 0x21 /* packet memory data path */
+#define FZA_TEST_PM_ADDR 0x22 /* packet memory address */
+#define FZA_TEST_RES_23 0x23 /* reserved */
+#define FZA_TEST_PM_DESC 0x24 /* packet memory descriptor */
+#define FZA_TEST_PM_OWN 0x25 /* packet memory own bit */
+#define FZA_TEST_PM_PARITY 0x26 /* packet memory parity */
+#define FZA_TEST_PM_BSWAP 0x27 /* packet memory byte swap */
+#define FZA_TEST_PM_WSWAP 0x28 /* packet memory word swap */
+#define FZA_TEST_PM_REF 0x29 /* packet memory refresh */
+#define FZA_TEST_PM_CSR 0x2A /* PM CSR */
+#define FZA_TEST_PORT_STATUS 0x2B /* port status register */
+#define FZA_TEST_HOST_IRQMASK 0x2C /* host interrupt mask */
+#define FZA_TEST_TIMER_IRQ1 0x2D /* RTOS timer */
+#define FZA_TEST_FORCE_IRQ1 0x2E /* force RTOS IRQ1 */
+#define FZA_TEST_TIMER_IRQ5 0x2F /* IRQ5 backoff timer */
+#define FZA_TEST_FORCE_IRQ5 0x30 /* force IRQ5 */
+#define FZA_TEST_RES_31 0x31 /* reserved */
+#define FZA_TEST_IC_PRIO 0x32 /* interrupt controller priority */
+#define FZA_TEST_PM_FULL 0x33 /* full packet memory */
+#define FZA_TEST_PMI_DMA 0x34 /* PMI DMA */
+
+/* Interrupt mask register constants. All bits are r/w. */
+#define FZA_MASK_RESERVED 0xf000 /* unused */
+#define FZA_MASK_DLU_DONE 0x0800 /* flash memory write complete */
+#define FZA_MASK_FLUSH_TX 0x0400 /* transmit ring flush request */
+#define FZA_MASK_PM_PARITY_ERR 0x0200 /* onboard packet memory parity error
+ */
+#define FZA_MASK_HB_PARITY_ERR 0x0100 /* host bus parity error */
+#define FZA_MASK_NXM_ERR 0x0080 /* adapter non-existent memory
+ * reference
+ */
+#define FZA_MASK_LINK_ST_CHG 0x0040 /* link status change */
+#define FZA_MASK_STATE_CHG 0x0020 /* adapter state change */
+#define FZA_MASK_UNS_POLL 0x0010 /* unsolicited event service request */
+#define FZA_MASK_CMD_DONE 0x0008 /* command ring entry processed */
+#define FZA_MASK_SMT_TX_POLL 0x0004 /* SMT frame transmit request */
+#define FZA_MASK_RCV_POLL 0x0002 /* receive request (packet available)
+ */
+#define FZA_MASK_TX_DONE 0x0001 /* RMC transmit done acknowledge */
+
+/* Which interrupts to receive: 0/1 is mask/unmask. */
+#define FZA_MASK_NONE 0x0000
+#define FZA_MASK_NORMAL \
+ ((~(FZA_MASK_RESERVED | FZA_MASK_DLU_DONE | \
+ FZA_MASK_PM_PARITY_ERR | FZA_MASK_HB_PARITY_ERR | \
+ FZA_MASK_NXM_ERR)) & 0xffff)
+
+/* Control A register constants. */
+#define FZA_CONTROL_A_HB_PARITY_ERR 0x8000 /* host bus parity error */
+#define FZA_CONTROL_A_NXM_ERR 0x4000 /* adapter non-existent memory
+ * reference
+ */
+#define FZA_CONTROL_A_SMT_RX_OVFL 0x0040 /* SMT receive overflow */
+#define FZA_CONTROL_A_FLUSH_DONE 0x0020 /* flush tx request complete */
+#define FZA_CONTROL_A_SHUT 0x0010 /* turn the interface off */
+#define FZA_CONTROL_A_HALT 0x0008 /* halt the controller */
+#define FZA_CONTROL_A_CMD_POLL 0x0004 /* command ring poll */
+#define FZA_CONTROL_A_SMT_RX_POLL 0x0002 /* SMT receive ring poll */
+#define FZA_CONTROL_A_TX_POLL 0x0001 /* transmit poll */
+
+/* Control B register constants. All bits are r/w.
+ *
+ * Possible values:
+ * 0x0000 after booting into REX,
+ * 0x0003 after issuing `boot #/mop'.
+ */
+#define FZA_CONTROL_B_CONSOLE 0x0002 /* OR with DRIVER for console
+ * (TC firmware) mode
+ */
+#define FZA_CONTROL_B_DRIVER 0x0001 /* driver mode */
+#define FZA_CONTROL_B_IDLE 0x0000 /* no driver installed */
+
+#define FZA_RESET_PAD \
+ (FZA_REG_RESET - FZA_REG_BASE)
+#define FZA_INT_EVENT_PAD \
+ (FZA_REG_INT_EVENT - FZA_REG_RESET - sizeof(u16))
+#define FZA_CONTROL_A_PAD \
+ (FZA_REG_CONTROL_A - FZA_REG_INT_MASK - sizeof(u16))
+
+/* Layout of registers. */
+struct fza_regs {
+ u8 pad0[FZA_RESET_PAD];
+ u16 reset; /* reset register */
+ u8 pad1[FZA_INT_EVENT_PAD];
+ u16 int_event; /* interrupt event register */
+ u16 status; /* status register */
+ u16 int_mask; /* interrupt mask register */
+ u8 pad2[FZA_CONTROL_A_PAD];
+ u16 control_a; /* control A register */
+ u16 control_b; /* control B register */
+};
+
+/* Command descriptor ring entry. */
+struct fza_ring_cmd {
+ u32 cmd_own; /* bit 31: ownership, bits [30:0]: command */
+ u32 stat; /* command status */
+ u32 buffer; /* address of the buffer in the FZA space */
+ u32 pad0;
+};
+
+#define FZA_RING_CMD 0x200400 /* command ring address */
+#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
+ * size
+/* Command constants. */
+#define FZA_RING_CMD_MASK 0x7fffffff
+#define FZA_RING_CMD_NOP 0x00000000 /* nop */
+#define FZA_RING_CMD_INIT 0x00000001 /* initialize */
+#define FZA_RING_CMD_MODCAM 0x00000002 /* modify CAM */
+#define FZA_RING_CMD_PARAM 0x00000003 /* set system parameters */
+#define FZA_RING_CMD_MODPROM 0x00000004 /* modify promiscuous mode */
+#define FZA_RING_CMD_SETCHAR 0x00000005 /* set link characteristics */
+#define FZA_RING_CMD_RDCNTR 0x00000006 /* read counters */
+#define FZA_RING_CMD_STATUS 0x00000007 /* get link status */
+#define FZA_RING_CMD_RDCAM 0x00000008 /* read CAM */
+
+/* Command status constants. */
+#define FZA_RING_STAT_SUCCESS 0x00000000
+
+/* Unsolicited event descriptor ring entry. */
+struct fza_ring_uns {
+ u32 own; /* bit 31: ownership, bits [30:0]: reserved */
+ u32 id; /* event ID */
+ u32 buffer; /* address of the buffer in the FZA space */
+ u32 pad0; /* reserved */
+};
+
+#define FZA_RING_UNS 0x200800 /* unsolicited ring address */
+#define FZA_RING_UNS_SIZE 0x40 /* unsolicited descriptor ring
+ * size
+ */
+/* Unsolicited event constants. */
+#define FZA_RING_UNS_UND 0x00000000 /* undefined event ID */
+#define FZA_RING_UNS_INIT_IN 0x00000001 /* ring init initiated */
+#define FZA_RING_UNS_INIT_RX 0x00000002 /* ring init received */
+#define FZA_RING_UNS_BEAC_IN 0x00000003 /* ring beaconing initiated */
+#define FZA_RING_UNS_DUP_ADDR 0x00000004 /* duplicate address detected */
+#define FZA_RING_UNS_DUP_TOK 0x00000005 /* duplicate token detected */
+#define FZA_RING_UNS_PURG_ERR 0x00000006 /* ring purger error */
+#define FZA_RING_UNS_STRIP_ERR 0x00000007 /* bridge strip error */
+#define FZA_RING_UNS_OP_OSC 0x00000008 /* ring op oscillation */
+#define FZA_RING_UNS_BEAC_RX 0x00000009 /* directed beacon received */
+#define FZA_RING_UNS_PCT_IN 0x0000000a /* PC trace initiated */
+#define FZA_RING_UNS_PCT_RX 0x0000000b /* PC trace received */
+#define FZA_RING_UNS_TX_UNDER 0x0000000c /* transmit underrun */
+#define FZA_RING_UNS_TX_FAIL 0x0000000d /* transmit failure */
+#define FZA_RING_UNS_RX_OVER 0x0000000e /* receive overrun */
+
+/* RMC (Ring Memory Control) transmit descriptor ring entry. */
+struct fza_ring_rmc_tx {
+ u32 rmc; /* RMC information */
+ u32 avl; /* available for host (unused by RMC) */
+ u32 own; /* bit 31: ownership, bits [30:0]: reserved */
+ u32 pad0; /* reserved */
+};
+
+#define FZA_TX_BUFFER_ADDR(x) (0x200000 | (((x) & 0xffff) << 5))
+#define FZA_TX_BUFFER_SIZE 512
+struct fza_buffer_tx {
+ u32 data[FZA_TX_BUFFER_SIZE / sizeof(u32)];
+};
+
+/* Transmit ring RMC constants. */
+#define FZA_RING_TX_SOP 0x80000000 /* start of packet */
+#define FZA_RING_TX_EOP 0x40000000 /* end of packet */
+#define FZA_RING_TX_DTP 0x20000000 /* discard this packet */
+#define FZA_RING_TX_VBC 0x10000000 /* valid buffer byte count */
+#define FZA_RING_TX_DCC_MASK 0x0f000000 /* DMA completion code */
+#define FZA_RING_TX_DCC_SUCCESS 0x01000000 /* transmit succeeded */
+#define FZA_RING_TX_DCC_DTP_SOP 0x02000000 /* DTP set at SOP */
+#define FZA_RING_TX_DCC_DTP 0x04000000 /* DTP set within packet */
+#define FZA_RING_TX_DCC_ABORT 0x05000000 /* MAC-requested abort */
+#define FZA_RING_TX_DCC_PARITY 0x06000000 /* xmit data parity error */
+#define FZA_RING_TX_DCC_UNDRRUN 0x07000000 /* transmit underrun */
+#define FZA_RING_TX_XPO_MASK 0x003fe000 /* transmit packet offset */
+
+/* Host receive descriptor ring entry. */
+struct fza_ring_hst_rx {
+ u32 buf0_own; /* bit 31: ownership, bits [30:23]: unused,
+ * bits [22:0]: right-shifted address of the
+ * buffer in system memory (low buffer)
+ */
+ u32 buffer1; /* bits [31:23]: unused,
+ * bits [22:0]: right-shifted address of the
+ * buffer in system memory (high buffer)
+ */
+ u32 rmc; /* RMC information */
+ u32 pad0;
+};
+
+#define FZA_RX_BUFFER_SIZE (4096 + 512) /* buffer length */
+
+/* Receive ring RMC constants. */
+#define FZA_RING_RX_SOP 0x80000000 /* start of packet */
+#define FZA_RING_RX_EOP 0x40000000 /* end of packet */
+#define FZA_RING_RX_FSC_MASK 0x38000000 /* # of frame status bits */
+#define FZA_RING_RX_FSB_MASK 0x07c00000 /* frame status bits */
+#define FZA_RING_RX_FSB_ERR 0x04000000 /* error detected */
+#define FZA_RING_RX_FSB_ADDR 0x02000000 /* address recognized */
+#define FZA_RING_RX_FSB_COP 0x01000000 /* frame copied */
+#define FZA_RING_RX_FSB_F0 0x00800000 /* first additional flag */
+#define FZA_RING_RX_FSB_F1 0x00400000 /* second additional flag */
+#define FZA_RING_RX_BAD 0x00200000 /* bad packet */
+#define FZA_RING_RX_CRC 0x00100000 /* CRC error */
+#define FZA_RING_RX_RRR_MASK 0x000e0000 /* MAC receive status bits */
+#define FZA_RING_RX_RRR_OK 0x00000000 /* receive OK */
+#define FZA_RING_RX_RRR_SADDR 0x00020000 /* source address matched */
+#define FZA_RING_RX_RRR_DADDR 0x00040000 /* dest address not matched */
+#define FZA_RING_RX_RRR_ABORT 0x00060000 /* RMC abort */
+#define FZA_RING_RX_RRR_LENGTH 0x00080000 /* invalid length */
+#define FZA_RING_RX_RRR_FRAG 0x000a0000 /* fragment */
+#define FZA_RING_RX_RRR_FORMAT 0x000c0000 /* format error */
+#define FZA_RING_RX_RRR_RESET 0x000e0000 /* MAC reset */
+#define FZA_RING_RX_DA_MASK 0x00018000 /* daddr match status bits */
+#define FZA_RING_RX_DA_NONE 0x00000000 /* no match */
+#define FZA_RING_RX_DA_PROM 0x00008000 /* promiscuous match */
+#define FZA_RING_RX_DA_CAM 0x00010000 /* CAM entry match */
+#define FZA_RING_RX_DA_LOCAL 0x00018000 /* link addr or LLC bcast */
+#define FZA_RING_RX_SA_MASK 0x00006000 /* saddr match status bits */
+#define FZA_RING_RX_SA_NONE 0x00000000 /* no match */
+#define FZA_RING_RX_SA_ALIAS 0x00002000 /* alias address match */
+#define FZA_RING_RX_SA_CAM 0x00004000 /* CAM entry match */
+#define FZA_RING_RX_SA_LOCAL 0x00006000 /* link address match */
+
+/* SMT (Station Management) transmit/receive descriptor ring entry. */
+struct fza_ring_smt {
+ u32 own; /* bit 31: ownership, bits [30:0]: unused */
+ u32 rmc; /* RMC information */
+ u32 buffer; /* address of the buffer */
+ u32 pad0; /* reserved */
+};
+
+/* Ownership constants.
+ *
+ * Only an owner is permitted to process a given ring entry.
+ * RMC transmit ring meanings are reversed.
+ */
+#define FZA_RING_OWN_MASK 0x80000000
+#define FZA_RING_OWN_FZA 0x00000000 /* permit FZA, forbid host */
+#define FZA_RING_OWN_HOST 0x80000000 /* permit host, forbid FZA */
+#define FZA_RING_TX_OWN_RMC 0x80000000 /* permit RMC, forbid host */
+#define FZA_RING_TX_OWN_HOST 0x00000000 /* permit host, forbid RMC */
+
+/* RMC constants. */
+#define FZA_RING_PBC_MASK 0x00001fff /* frame length */
+
+/* Layout of counter buffers. */
+
+struct fza_counter {
+ u32 msw;
+ u32 lsw;
+};
+
+struct fza_counters {
+ struct fza_counter sys_buf; /* system buffer unavailable */
+ struct fza_counter tx_under; /* transmit underruns */
+ struct fza_counter tx_fail; /* transmit failures */
+ struct fza_counter rx_over; /* receive data overruns */
+ struct fza_counter frame_cnt; /* frame count */
+ struct fza_counter error_cnt; /* error count */
+ struct fza_counter lost_cnt; /* lost count */
+ struct fza_counter rinit_in; /* ring initialization initiated */
+ struct fza_counter rinit_rx; /* ring initialization received */
+ struct fza_counter beac_in; /* ring beacon initiated */
+ struct fza_counter dup_addr; /* duplicate address test failures */
+ struct fza_counter dup_tok; /* duplicate token detected */
+ struct fza_counter purg_err; /* ring purge errors */
+ struct fza_counter strip_err; /* bridge strip errors */
+ struct fza_counter pct_in; /* traces initiated */
+ struct fza_counter pct_rx; /* traces received */
+ struct fza_counter lem_rej; /* LEM rejects */
+ struct fza_counter tne_rej; /* TNE expiry rejects */
+ struct fza_counter lem_event; /* LEM events */
+ struct fza_counter lct_rej; /* LCT rejects */
+ struct fza_counter conn_cmpl; /* connections completed */
+ struct fza_counter el_buf; /* elasticity buffer errors */
+};
+
+/* Layout of command buffers. */
+
+/* INIT command buffer.
+ *
+ * Values of default link parameters given are as obtained from a
+ * DEFZA-AA rev. C03 board. The board counts time in units of 80ns.
+ */
+struct fza_cmd_init {
+ u32 tx_mode; /* transmit mode */
+ u32 hst_rx_size; /* host receive ring entries */
+
+ struct fza_counters counters; /* counters */
+
+ u8 rmc_rev[4]; /* RMC revision */
+ u8 rom_rev[4]; /* ROM revision */
+ u8 fw_rev[4]; /* firmware revision */
+
+ u32 mop_type; /* MOP device type */
+
+ u32 hst_rx; /* base of host rx descriptor ring */
+ u32 rmc_tx; /* base of RMC tx descriptor ring */
+ u32 rmc_tx_size; /* size of RMC tx descriptor ring */
+ u32 smt_tx; /* base of SMT tx descriptor ring */
+ u32 smt_tx_size; /* size of SMT tx descriptor ring */
+ u32 smt_rx; /* base of SMT rx descriptor ring */
+ u32 smt_rx_size; /* size of SMT rx descriptor ring */
+
+ u32 hw_addr[2]; /* link address */
+
+ u32 def_t_req; /* default Requested TTRT (T_REQ) --
+ * C03: 100000 [80ns]
+ */
+ u32 def_tvx; /* default Valid Transmission Time
+ * (TVX) -- C03: 32768 [80ns]
+ */
+ u32 def_t_max; /* default Maximum TTRT (T_MAX) --
+ * C03: 2162688 [80ns]
+ */
+ u32 lem_threshold; /* default LEM threshold -- C03: 8 */
+ u32 def_station_id[2]; /* default station ID */
+
+ u32 pmd_type_alt; /* alternative PMD type code */
+
+ u32 smt_ver; /* SMT version */
+
+ u32 rtoken_timeout; /* default restricted token timeout
+ * -- C03: 12500000 [80ns]
+ */
+ u32 ring_purger; /* default ring purger enable --
+ * C03: 1
+ */
+
+ u32 smt_ver_max; /* max SMT version ID */
+ u32 smt_ver_min; /* min SMT version ID */
+ u32 pmd_type; /* PMD type code */
+};
+
+/* INIT command PMD type codes. */
+#define FZA_PMD_TYPE_MMF 0 /* Multimode fiber */
+#define FZA_PMD_TYPE_TW 101 /* ThinWire */
+#define FZA_PMD_TYPE_STP 102 /* STP */
+
+/* MODCAM/RDCAM command buffer. */
+#define FZA_CMD_CAM_SIZE 64 /* CAM address entry count */
+struct fza_cmd_cam {
+ u32 hw_addr[FZA_CMD_CAM_SIZE][2]; /* CAM address entries */
+};
+
+/* PARAM command buffer.
+ *
+ * Permitted ranges given are as defined by the spec and obtained from a
+ * DEFZA-AA rev. C03 board, respectively. The rtoken_timeout field is
+ * erroneously interpreted in units of ms.
+ */
+struct fza_cmd_param {
+ u32 loop_mode; /* loopback mode */
+ u32 t_max; /* Maximum TTRT (T_MAX)
+ * def: ??? [80ns]
+ * C03: [t_req+1,4294967295] [80ns]
+ */
+ u32 t_req; /* Requested TTRT (T_REQ)
+ * def: [50000,2097151] [80ns]
+ * C03: [50001,t_max-1] [80ns]
+ */
+ u32 tvx; /* Valid Transmission Time (TVX)
+ * def: [29375,65280] [80ns]
+ * C03: [29376,65279] [80ns]
+ */
+ u32 lem_threshold; /* LEM threshold */
+ u32 station_id[2]; /* station ID */
+ u32 rtoken_timeout; /* restricted token timeout
+ * def: [0,125000000] [80ns]
+ * C03: [0,9999] [ms]
+ */
+ u32 ring_purger; /* ring purger enable: 0|1 */
+};
+
+/* Loopback modes for the PARAM command. */
+#define FZA_LOOP_NORMAL 0
+#define FZA_LOOP_INTERN 1
+#define FZA_LOOP_EXTERN 2
+
+/* MODPROM command buffer. */
+struct fza_cmd_modprom {
+ u32 llc_prom; /* LLC promiscuous enable */
+ u32 smt_prom; /* SMT promiscuous enable */
+ u32 llc_multi; /* LLC multicast promiscuous enable */
+ u32 llc_bcast; /* LLC broadcast promiscuous enable */
+};
+
+/* SETCHAR command buffer.
+ *
+ * Permitted ranges are as for the PARAM command.
+ */
+struct fza_cmd_setchar {
+ u32 t_max; /* Maximum TTRT (T_MAX) */
+ u32 t_req; /* Requested TTRT (T_REQ) */
+ u32 tvx; /* Valid Transmission Time (TVX) */
+ u32 lem_threshold; /* LEM threshold */
+ u32 rtoken_timeout; /* restricted token timeout */
+ u32 ring_purger; /* ring purger enable */
+};
+
+/* RDCNTR command buffer. */
+struct fza_cmd_rdcntr {
+ struct fza_counters counters; /* counters */
+};
+
+/* STATUS command buffer. */
+struct fza_cmd_status {
+ u32 led_state; /* LED state */
+ u32 rmt_state; /* ring management state */
+ u32 link_state; /* link state */
+ u32 dup_addr; /* duplicate address flag */
+ u32 ring_purger; /* ring purger state */
+ u32 t_neg; /* negotiated TTRT [80ns] */
+ u32 una[2]; /* upstream neighbour address */
+ u32 una_timeout; /* UNA timed out */
+ u32 strip_mode; /* frame strip mode */
+ u32 yield_mode; /* claim token yield mode */
+ u32 phy_state; /* PHY state */
+ u32 neigh_phy; /* neighbour PHY type */
+ u32 reject; /* reject reason */
+ u32 phy_lee; /* PHY link error estimate [-log10] */
+ u32 una_old[2]; /* old upstream neighbour address */
+ u32 rmt_mac; /* remote MAC indicated */
+ u32 ring_err; /* ring error reason */
+ u32 beac_rx[2]; /* sender of last directed beacon */
+ u32 un_dup_addr; /* upstream neighbr dup address flag */
+ u32 dna[2]; /* downstream neighbour address */
+ u32 dna_old[2]; /* old downstream neighbour address */
+};
+
+/* Common command buffer. */
+union fza_cmd_buf {
+ struct fza_cmd_init init;
+ struct fza_cmd_cam cam;
+ struct fza_cmd_param param;
+ struct fza_cmd_modprom modprom;
+ struct fza_cmd_setchar setchar;
+ struct fza_cmd_rdcntr rdcntr;
+ struct fza_cmd_status status;
+};
+
+/* MAC (Media Access Controller) chip packet request header constants. */
+
+/* Packet request header byte #0. */
+#define FZA_PRH0_FMT_TYPE_MASK 0xc0 /* type of packet, always zero */
+#define FZA_PRH0_TOK_TYPE_MASK 0x30 /* type of token required
+ * to send this frame
+ */
+#define FZA_PRH0_TKN_TYPE_ANY 0x30 /* use either token type */
+#define FZA_PRH0_TKN_TYPE_UNR 0x20 /* use an unrestricted token */
+#define FZA_PRH0_TKN_TYPE_RST 0x10 /* use a restricted token */
+#define FZA_PRH0_TKN_TYPE_IMM 0x00 /* send immediately, no token required
+ */
+#define FZA_PRH0_FRAME_MASK 0x08 /* type of frame to send */
+#define FZA_PRH0_FRAME_SYNC 0x08 /* send a synchronous frame */
+#define FZA_PRH0_FRAME_ASYNC 0x00 /* send an asynchronous frame */
+#define FZA_PRH0_MODE_MASK 0x04 /* send mode */
+#define FZA_PRH0_MODE_IMMED 0x04 /* an immediate mode, send regardless
+ * of the ring operational state
+ */
+#define FZA_PRH0_MODE_NORMAL 0x00 /* a normal mode, send only if ring
+ * operational
+ */
+#define FZA_PRH0_SF_MASK 0x02 /* send frame first */
+#define FZA_PRH0_SF_FIRST 0x02 /* send this frame first
+ * with this token capture
+ */
+#define FZA_PRH0_SF_NORMAL 0x00 /* treat this frame normally */
+#define FZA_PRH0_BCN_MASK 0x01 /* beacon frame */
+#define FZA_PRH0_BCN_BEACON 0x01 /* send the frame only
+ * if in the beacon state
+ */
+#define FZA_PRH0_BCN_DATA 0x01 /* send the frame only
+ * if in the data state
+ */
+/* Packet request header byte #1. */
+ /* bit 7 always zero */
+#define FZA_PRH1_SL_MASK 0x40 /* send frame last */
+#define FZA_PRH1_SL_LAST 0x40 /* send this frame last, releasing
+ * the token afterwards
+ */
+#define FZA_PRH1_SL_NORMAL 0x00 /* treat this frame normally */
+#define FZA_PRH1_CRC_MASK 0x20 /* CRC append */
+#define FZA_PRH1_CRC_NORMAL 0x20 /* calculate the CRC and append it
+ * as the FCS field to the frame
+ */
+#define FZA_PRH1_CRC_SKIP 0x00 /* leave the frame as is */
+#define FZA_PRH1_TKN_SEND_MASK 0x18 /* type of token to send after the
+ * frame if this is the last frame
+ */
+#define FZA_PRH1_TKN_SEND_ORIG 0x18 /* send a token of the same type as the
+ * originally captured one
+ */
+#define FZA_PRH1_TKN_SEND_RST 0x10 /* send a restricted token */
+#define FZA_PRH1_TKN_SEND_UNR 0x08 /* send an unrestricted token */
+#define FZA_PRH1_TKN_SEND_NONE 0x00 /* send no token */
+#define FZA_PRH1_EXTRA_FS_MASK 0x07 /* send extra frame status indicators
+ */
+#define FZA_PRH1_EXTRA_FS_ST 0x07 /* TR RR ST II */
+#define FZA_PRH1_EXTRA_FS_SS 0x06 /* TR RR SS II */
+#define FZA_PRH1_EXTRA_FS_SR 0x05 /* TR RR SR II */
+#define FZA_PRH1_EXTRA_FS_NONE1 0x04 /* TR RR II II */
+#define FZA_PRH1_EXTRA_FS_RT 0x03 /* TR RR RT II */
+#define FZA_PRH1_EXTRA_FS_RS 0x02 /* TR RR RS II */
+#define FZA_PRH1_EXTRA_FS_RR 0x01 /* TR RR RR II */
+#define FZA_PRH1_EXTRA_FS_NONE 0x00 /* TR RR II II */
+/* Packet request header byte #2. */
+#define FZA_PRH2_NORMAL 0x00 /* always zero */
+
+/* PRH used for LLC frames. */
+#define FZA_PRH0_LLC (FZA_PRH0_TKN_TYPE_UNR)
+#define FZA_PRH1_LLC (FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR)
+#define FZA_PRH2_LLC (FZA_PRH2_NORMAL)
+
+/* PRH used for SMT frames. */
+#define FZA_PRH0_SMT (FZA_PRH0_TKN_TYPE_UNR)
+#define FZA_PRH1_SMT (FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR)
+#define FZA_PRH2_SMT (FZA_PRH2_NORMAL)
+
+#if ((FZA_RING_RX_SIZE) < 2) || ((FZA_RING_RX_SIZE) > 256)
+# error FZA_RING_RX_SIZE has to be from 2 up to 256
+#endif
+#if ((FZA_RING_TX_MODE) != 0) && ((FZA_RING_TX_MODE) != 1)
+# error FZA_RING_TX_MODE has to be either 0 or 1
+#endif
+
+#define FZA_RING_TX_SIZE (512 << (FZA_RING_TX_MODE))
+
+struct fza_private {
+ struct device *bdev; /* pointer to the bus device */
+ const char *name; /* printable device name */
+ void __iomem *mmio; /* MMIO ioremap cookie */
+ struct fza_regs __iomem *regs; /* pointer to FZA registers */
+
+ struct sk_buff *rx_skbuff[FZA_RING_RX_SIZE];
+ /* all skbs assigned to the host
+ * receive descriptors
+ */
+ dma_addr_t rx_dma[FZA_RING_RX_SIZE];
+ /* their corresponding DMA addresses */
+
+ struct fza_ring_cmd __iomem *ring_cmd;
+ /* pointer to the command descriptor
+ * ring
+ */
+ int ring_cmd_index; /* index to the command descriptor ring
+ * for the next command
+ */
+ struct fza_ring_uns __iomem *ring_uns;
+ /* pointer to the unsolicited
+ * descriptor ring
+ */
+ int ring_uns_index; /* index to the unsolicited descriptor
+ * ring for the next event
+ */
+
+ struct fza_ring_rmc_tx __iomem *ring_rmc_tx;
+ /* pointer to the RMC transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_rmc_tx_size; /* number of entries in the RMC
+ * transmit descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_rmc_tx_index; /* index to the RMC transmit descriptor
+ * ring for the next transmission
+ */
+ int ring_rmc_txd_index; /* index to the RMC transmit descriptor
+ * ring for the next transmit done
+ * acknowledge
+ */
+
+ struct fza_ring_hst_rx __iomem *ring_hst_rx;
+ /* pointer to the host receive
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_hst_rx_size; /* number of entries in the host
+ * receive descriptor ring (set by the
+ * INIT command)
+ */
+ int ring_hst_rx_index; /* index to the host receive descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_ring_smt __iomem *ring_smt_tx;
+ /* pointer to the SMT transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_smt_tx_size; /* number of entries in the SMT
+ * transmit descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_smt_tx_index; /* index to the SMT transmit descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_ring_smt __iomem *ring_smt_rx;
+ /* pointer to the SMT transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_smt_rx_size; /* number of entries in the SMT
+ * receive descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_smt_rx_index; /* index to the SMT receive descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_buffer_tx __iomem *buffer_tx;
+ /* pointer to the RMC transmit buffers
+ */
+
+ uint state; /* adapter expected state */
+
+ spinlock_t lock; /* for device & private data access */
+ uint int_mask; /* interrupt source selector */
+
+ int cmd_done_flag; /* command completion trigger */
+ wait_queue_head_t cmd_done_wait;
+
+ int state_chg_flag; /* state change trigger */
+ wait_queue_head_t state_chg_wait;
+
+ struct timer_list reset_timer; /* RESET time-out trigger */
+ int timer_state; /* RESET trigger state */
+
+ int queue_active; /* whether to enable queueing */
+
+ struct net_device_stats stats;
+
+ uint irq_count_flush_tx; /* transmit flush irqs */
+ uint irq_count_uns_poll; /* unsolicited event irqs */
+ uint irq_count_smt_tx_poll; /* SMT transmit irqs */
+ uint irq_count_rx_poll; /* host receive irqs */
+ uint irq_count_tx_done; /* transmit done irqs */
+ uint irq_count_cmd_done; /* command done irqs */
+ uint irq_count_state_chg; /* state change irqs */
+ uint irq_count_link_st_chg; /* link status change irqs */
+
+ uint t_max; /* T_MAX */
+ uint t_req; /* T_REQ */
+ uint tvx; /* TVX */
+ uint lem_threshold; /* LEM threshold */
+ uint station_id[2]; /* station ID */
+ uint rtoken_timeout; /* restricted token timeout */
+ uint ring_purger; /* ring purger enable flag */
+};
+
+struct fza_fddihdr {
+ u8 pa[2]; /* preamble */
+ u8 sd; /* starting delimiter */
+ struct fddihdr hdr;
+} __packed;
diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h
index a12f464941ed..448d66c2e372 100644
--- a/drivers/net/fddi/skfp/h/cmtdef.h
+++ b/drivers/net/fddi/skfp/h/cmtdef.h
@@ -655,14 +655,6 @@ void dump_hex(char *p, int len);
#ifndef PNMI_INIT
#define PNMI_INIT(smc) /* Nothing */
#endif
-#ifndef PNMI_GET_ID
-#define PNMI_GET_ID( smc, ndis_oid, buf, len, BytesWritten, BytesNeeded ) \
- ( 1 ? (-1) : (-1) )
-#endif
-#ifndef PNMI_SET_ID
-#define PNMI_SET_ID( smc, ndis_oid, buf, len, BytesRead, BytesNeeded, \
- set_type) ( 1 ? (-1) : (-1) )
-#endif
/*
* SMT_PANIC defines
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 9bcaf204a7d4..cf36e7ff3191 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2030,14 +2030,15 @@ static void netvsc_vf_setup(struct work_struct *w)
rtnl_unlock();
}
-/* Find netvsc by VMBus serial number.
- * The PCI hyperv controller records the serial number as the slot.
+/* Find netvsc by VF serial number.
+ * The PCI hyperv controller records the serial number as the slot kobj name.
*/
static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
struct pci_dev *pdev;
+ u32 serial;
if (!parent || !dev_is_pci(parent))
return NULL; /* not a PCI device */
@@ -2048,16 +2049,22 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL;
}
+ if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
+ netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
+ pci_slot_name(pdev->slot));
+ return NULL;
+ }
+
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
if (!ndev_ctx->vf_alloc)
continue;
- if (ndev_ctx->vf_serial == pdev->slot->number)
+ if (ndev_ctx->vf_serial == serial)
return hv_get_drvdata(ndev_ctx->device_ctx);
}
netdev_notice(vf_netdev,
- "no netdev found for slot %u\n", pdev->slot->number);
+ "no netdev found for vf serial:%u\n", serial);
return NULL;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 14509a8903c6..1d73ac3309ce 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -482,16 +482,15 @@ static int phy_config_aneg(struct phy_device *phydev)
}
/**
- * phy_start_aneg_priv - start auto-negotiation for this PHY device
+ * phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
- * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: Sanitizes the settings (if we're not autonegotiating
* them), and then calls the driver's config_aneg function.
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
+int phy_start_aneg(struct phy_device *phydev)
{
bool trigger = 0;
int err;
@@ -541,20 +540,6 @@ out_unlock:
return err;
}
-
-/**
- * phy_start_aneg - start auto-negotiation for this PHY device
- * @phydev: the phy_device struct
- *
- * Description: Sanitizes the settings (if we're not autonegotiating
- * them), and then calls the driver's config_aneg function.
- * If the PHYCONTROL Layer is operating, we change the state to
- * reflect the beginning of Auto-negotiation or forcing.
- */
-int phy_start_aneg(struct phy_device *phydev)
-{
- return phy_start_aneg_priv(phydev, true);
-}
EXPORT_SYMBOL(phy_start_aneg);
static int phy_poll_aneg_done(struct phy_device *phydev)
@@ -654,7 +639,7 @@ static void phy_queue_state_machine(struct phy_device *phydev,
*/
void phy_start_machine(struct phy_device *phydev)
{
- phy_queue_state_machine(phydev, 1);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL_GPL(phy_start_machine);
@@ -941,7 +926,6 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
- int old_link;
mutex_lock(&phydev->lock);
@@ -1025,26 +1009,16 @@ void phy_state_machine(struct work_struct *work)
}
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are polling and link changed
- * since latest checking.
- */
- if (phy_polling_mode(phydev)) {
- old_link = phydev->link;
- err = phy_read_status(phydev);
- if (err)
- break;
+ if (!phy_polling_mode(phydev))
+ break;
- if (old_link != phydev->link)
- phydev->state = PHY_CHANGELINK;
- }
- /*
- * Failsafe: check that nobody set phydev->link=0 between two
- * poll cycles, otherwise we won't leave RUNNING state as long
- * as link remains down.
- */
- if (!phydev->link && phydev->state == PHY_RUNNING) {
- phydev->state = PHY_CHANGELINK;
- phydev_err(phydev, "no link in PHY_RUNNING\n");
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, true);
}
break;
case PHY_CHANGELINK:
@@ -1070,48 +1044,33 @@ void phy_state_machine(struct work_struct *work)
case PHY_RESUMING:
if (AUTONEG_ENABLE == phydev->autoneg) {
err = phy_aneg_done(phydev);
- if (err < 0)
+ if (err < 0) {
break;
-
- /* err > 0 if AN is done.
- * Otherwise, it's 0, and we're still waiting for AN
- */
- if (err > 0) {
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
- }
- } else {
+ } else if (!err) {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
- }
- } else {
- err = phy_read_status(phydev);
- if (err)
break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
}
}
+
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, false);
+ }
break;
}
mutex_unlock(&phydev->lock);
if (needs_aneg)
- err = phy_start_aneg_priv(phydev, false);
+ err = phy_start_aneg(phydev);
else if (do_suspend)
phy_suspend(phydev);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 6e13b8832bc7..fd8bb998ae52 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
/* Give this long for the PHY to reset. */
#define T_PHY_RESET_MS 50
-static DEFINE_MUTEX(sfp_mutex);
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b1743420939b..060135ceaf0e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -562,12 +562,11 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
e->rps_rxhash = hash;
}
-/* We try to identify a flow through its rxhash first. The reason that
+/* We try to identify a flow through its rxhash. The reason that
* we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
- * different rxq no. here. If we could not get rxhash, then we would
- * hope the rxq no. may help here.
+ * different rxq no. here.
*/
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
@@ -578,18 +577,13 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
- if (txq) {
- e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
- if (e) {
- tun_flow_save_rps_rxhash(e, txq);
- txq = e->queue_index;
- } else
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
- } else if (likely(skb_rx_queue_recorded(skb))) {
- txq = skb_get_rx_queue(skb);
- while (unlikely(txq >= numqueues))
- txq -= numqueues;
+ e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+ if (e) {
+ tun_flow_save_rps_rxhash(e, txq);
+ txq = e->queue_index;
+ } else {
+ /* use multiply and shift instead of expensive divide */
+ txq = ((u64)txq * numqueues) >> 32;
}
return txq;
@@ -1047,16 +1041,13 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
+ struct tun_flow_entry *e;
__u32 rxhash;
rxhash = __skb_get_hash_symmetric(skb);
- if (rxhash) {
- struct tun_flow_entry *e;
- e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
- rxhash);
- if (e)
- tun_flow_save_rps_rxhash(e, rxhash);
- }
+ e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
+ if (e)
+ tun_flow_save_rps_rxhash(e, rxhash);
}
#endif
}
@@ -2298,6 +2289,8 @@ static void tun_setup(struct net_device *dev)
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ if (!data)
+ return 0;
return -EINVAL;
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1eaec648bd1f..50c05d0f44cb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -779,8 +779,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- ctx->bh.data = (unsigned long)dev;
- ctx->bh.func = cdc_ncm_txpath_bh;
+ tasklet_init(&ctx->bh, cdc_ncm_txpath_bh, (unsigned long)dev);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
@@ -1601,11 +1600,8 @@ cdc_ncm_speed_change(struct usbnet *dev,
static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
{
- struct cdc_ncm_ctx *ctx;
struct usb_cdc_notification *event;
- ctx = (struct cdc_ncm_ctx *)dev->data[0];
-
if (urb->actual_length < sizeof(*event))
return;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 533b6fb8d923..72a55b6b4211 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 224c56a4e2b1..890fa5b905e2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -37,11 +37,19 @@
#define VETH_XDP_TX BIT(0)
#define VETH_XDP_REDIR BIT(1)
+struct veth_rq_stats {
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_drops;
+ struct u64_stats_sync syncp;
+};
+
struct veth_rq {
struct napi_struct xdp_napi;
struct net_device *dev;
struct bpf_prog __rcu *xdp_prog;
struct xdp_mem_info xdp_mem;
+ struct veth_rq_stats stats;
bool rx_notify_masked;
struct ptr_ring xdp_ring;
struct xdp_rxq_info xdp_rxq;
@@ -59,6 +67,21 @@ struct veth_priv {
* ethtool interface
*/
+struct veth_q_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
+
+static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
+ { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
+ { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
+ { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
+};
+
+#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
+
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
@@ -83,9 +106,20 @@ static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ char *p = (char *)buf;
+ int i, j;
+
switch(stringset) {
case ETH_SS_STATS:
- memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ p += sizeof(ethtool_stats_keys);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+ i, veth_rq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -94,7 +128,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
+ return ARRAY_SIZE(ethtool_stats_keys) +
+ VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
default:
return -EOPNOTSUPP;
}
@@ -105,8 +140,25 @@ static void veth_get_ethtool_stats(struct net_device *dev,
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
+ int i, j, idx;
data[0] = peer ? peer->ifindex : 0;
+ idx = 1;
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
+ const void *stats_base = (void *)rq_stats;
+ unsigned int start;
+ size_t offset;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
+ offset = veth_rq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ idx += VETH_RQ_STATS_LEN;
+ }
}
static int veth_get_ts_info(struct net_device *dev,
@@ -211,12 +263,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
+ if (!rcv_xdp) {
+ struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->bytes += length;
+ stats->packets++;
+ u64_stats_update_end(&stats->syncp);
+ }
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -230,7 +284,7 @@ drop:
return NETDEV_TX_OK;
}
-static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int cpu;
@@ -253,23 +307,58 @@ static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
return atomic64_read(&priv->dropped);
}
+static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ result->xdp_packets = 0;
+ result->xdp_bytes = 0;
+ result->xdp_drops = 0;
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct veth_rq_stats *stats = &priv->rq[i].stats;
+ u64 packets, bytes, drops;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->xdp_packets;
+ bytes = stats->xdp_bytes;
+ drops = stats->xdp_drops;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ result->xdp_packets += packets;
+ result->xdp_bytes += bytes;
+ result->xdp_drops += drops;
+ }
+}
+
static void veth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct pcpu_lstats one;
+ struct veth_rq_stats rx;
+ struct pcpu_lstats tx;
- tot->tx_dropped = veth_stats_one(&one, dev);
- tot->tx_bytes = one.bytes;
- tot->tx_packets = one.packets;
+ tot->tx_dropped = veth_stats_tx(&tx, dev);
+ tot->tx_bytes = tx.bytes;
+ tot->tx_packets = tx.packets;
+
+ veth_stats_rx(&rx, dev);
+ tot->rx_dropped = rx.xdp_drops;
+ tot->rx_bytes = rx.xdp_bytes;
+ tot->rx_packets = rx.xdp_packets;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped = veth_stats_one(&one, peer);
- tot->rx_bytes = one.bytes;
- tot->rx_packets = one.packets;
+ tot->rx_dropped += veth_stats_tx(&tx, peer);
+ tot->rx_bytes += tx.bytes;
+ tot->rx_packets += tx.packets;
+
+ veth_stats_rx(&rx, peer);
+ tot->tx_bytes += rx.xdp_bytes;
+ tot->tx_packets += rx.xdp_packets;
}
rcu_read_unlock();
}
@@ -308,16 +397,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *rcv;
+ int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq;
- int i, drops = 0;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
+ goto drop;
+ }
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv))
- return -ENXIO;
+ if (unlikely(!rcv)) {
+ ret = -ENXIO;
+ goto drop;
+ }
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
@@ -325,9 +418,12 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog))
- return -ENXIO;
+ if (!rcu_access_pointer(rq->xdp_prog)) {
+ ret = -ENXIO;
+ goto drop;
+ }
+ drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock);
@@ -346,7 +442,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- return n - drops;
+ if (likely(!drops))
+ return n;
+
+ ret = n - drops;
+drop:
+ atomic64_add(drops, &priv->dropped);
+
+ return ret;
}
static void veth_xdp_flush(struct net_device *dev)
@@ -595,28 +698,42 @@ xdp_xmit:
static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
{
- int i, done = 0;
+ int i, done = 0, drops = 0, bytes = 0;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
+ unsigned int xdp_xmit_one = 0;
struct sk_buff *skb;
if (!ptr)
break;
if (veth_is_xdp_frame(ptr)) {
- skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
- xdp_xmit);
+ struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
+
+ bytes += frame->len;
+ skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one);
} else {
- skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
+ skb = ptr;
+ bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one);
}
+ *xdp_xmit |= xdp_xmit_one;
if (skb)
napi_gro_receive(&rq->xdp_napi, skb);
+ else if (!xdp_xmit_one)
+ drops++;
done++;
}
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.xdp_packets += done;
+ rq->stats.xdp_bytes += bytes;
+ rq->stats.xdp_drops += drops;
+ u64_stats_update_end(&rq->stats.syncp);
+
return done;
}
@@ -807,8 +924,10 @@ static int veth_alloc_queues(struct net_device *dev)
if (!priv->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
+ for (i = 0; i < dev->num_rx_queues; i++) {
priv->rq[i].dev = dev;
+ u64_stats_init(&priv->rq[i].stats.syncp);
+ }
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index dab504ec5e50..3f5aa59c37b7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2170,6 +2170,53 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct ethtool_coalesce ec_default = {
+ .cmd = ETHTOOL_SCOALESCE,
+ .rx_max_coalesced_frames = 1,
+ };
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i, napi_weight;
+
+ if (ec->tx_max_coalesced_frames > 1)
+ return -EINVAL;
+
+ ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
+
+ /* disallow changes to fields not explicitly tested above */
+ if (memcmp(ec, &ec_default, sizeof(ec_default)))
+ return -EINVAL;
+
+ if (napi_weight ^ vi->sq[0].napi.weight) {
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ vi->sq[i].napi.weight = napi_weight;
+ }
+
+ return 0;
+}
+
+static int virtnet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct ethtool_coalesce ec_default = {
+ .cmd = ETHTOOL_GCOALESCE,
+ .rx_max_coalesced_frames = 1,
+ };
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ memcpy(ec, &ec_default, sizeof(ec_default));
+
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+
+ return 0;
+}
+
static void virtnet_init_settings(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2208,6 +2255,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = virtnet_get_link_ksettings,
.set_link_ksettings = virtnet_set_link_ksettings,
+ .set_coalesce = virtnet_set_coalesce,
+ .get_coalesce = virtnet_get_coalesce,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index fb0cdbba8d76..018406c4d944 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -697,6 +697,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u8 ndm_flags)
{
+ __u8 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
@@ -714,8 +715,8 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
- if (f->flags != ndm_flags) {
- f->flags = ndm_flags;
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
f->updated = jiffies;
notify = 1;
}
@@ -737,6 +738,9 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
return rc;
notify |= rc;
}
+
+ if (ndm_flags & NTF_USE)
+ f->used = jiffies;
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
@@ -748,7 +752,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
- vni, ifindex, ndm_flags, &f);
+ vni, ifindex, fdb_flags, &f);
if (rc < 0)
return rc;
notify = 1;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 6572a43590a8..e1ad6b9166a6 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -44,6 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_QMI_HELPERS
---help---
This module adds support for integrated WCN3990 chip connected
to system NOC(SNOC). Currently work in progress and will not
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 44d60a61b242..66326b949ab1 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o
obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
-ath10k_snoc-y += snoc.o
+ath10k_snoc-y += qmi.o \
+ qmi_wlfw_v01.o \
+ snoc.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 203f30992c26..da607febfd82 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -989,7 +989,7 @@ static int ath10k_download_fw(struct ath10k *ar)
data, data_len);
}
-static void ath10k_core_free_board_files(struct ath10k *ar)
+void ath10k_core_free_board_files(struct ath10k *ar)
{
if (!IS_ERR(ar->normal_mode_fw.board))
release_firmware(ar->normal_mode_fw.board);
@@ -1004,6 +1004,7 @@ static void ath10k_core_free_board_files(struct ath10k *ar)
ar->normal_mode_fw.ext_board_data = NULL;
ar->normal_mode_fw.ext_board_len = 0;
}
+EXPORT_SYMBOL(ath10k_core_free_board_files);
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
@@ -1331,6 +1332,14 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
goto out;
}
+ if (ar->id.qmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id);
+ goto out;
+ }
+
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
@@ -1359,7 +1368,7 @@ static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
return -1;
}
-static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
+int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
{
char boardname[100], fallback_boardname[100];
int ret;
@@ -1407,6 +1416,7 @@ success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
return 0;
}
+EXPORT_SYMBOL(ath10k_core_fetch_board_file);
static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
{
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index c76af343db3d..042418097cf9 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -951,6 +951,7 @@ struct ath10k {
/* protected by conf_mutex */
u8 ps_state_enable;
+ bool nlo_enabled;
bool p2p;
struct {
@@ -988,6 +989,8 @@ struct ath10k {
u32 subsystem_device;
bool bmi_ids_valid;
+ bool qmi_ids_valid;
+ u32 qmi_board_id;
u8 bmi_board_id;
u8 bmi_eboard_id;
u8 bmi_chip_id;
@@ -1215,5 +1218,7 @@ void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
+int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
+void ath10k_core_free_board_files(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 2c0cb6757fc6..15964b374f68 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2421,7 +2421,7 @@ static ssize_t ath10k_write_ps_state_enable(struct file *file,
if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
return -EINVAL;
- if (ps_state_enable > 1 || ps_state_enable < 0)
+ if (ps_state_enable > 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 3a6191cff2f9..5cf16d690724 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -44,6 +44,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000,
+ ATH10K_DBG_QMI = 0x00200000,
ATH10K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f2405258a6d3..ffec98f7be50 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2680,8 +2680,6 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
} else {
mcs = legacy_rate_idx;
- if (mcs < 0)
- return;
STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
@@ -2753,7 +2751,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- u8 rate = 0, rate_idx = 0, sgi;
+ u8 rate = 0, sgi;
+ s8 rate_idx = 0;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3933dd96da55..a1c2801ded10 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -164,7 +164,7 @@ static int ath10k_mac_get_rate_hw_value(int bitrate)
if (ath10k_mac_bitrate_is_cck(bitrate))
hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
- for (i = 0; i < sizeof(ath10k_rates); i++) {
+ for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
if (ath10k_rates[i].bitrate == bitrate)
return hw_value_prefix | ath10k_rates[i].hw_value;
}
@@ -4697,6 +4697,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
+ goto err_core_stop;
+ }
+ }
+
if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
ret = ath10k_wmi_adaptive_qcs(ar, true);
if (ret) {
@@ -5682,22 +5690,22 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
return;
}
- sband = ar->hw->wiphy->bands[def.chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
- bitrate = sband->bitrates[basic_rate_idx].bitrate;
+ sband = ar->hw->wiphy->bands[def.chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
- hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
- if (hw_rate_code < 0) {
- ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
- mutex_unlock(&ar->conf_mutex);
- return;
- }
+ hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
- vdev_param = ar->wmi.vdev_param->mgmt_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- hw_rate_code);
- if (ret)
- ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+ vdev_param = ar->wmi.vdev_param->mgmt_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
}
mutex_unlock(&ar->conf_mutex);
@@ -6855,9 +6863,20 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
-
- if (drop)
+ struct ath10k_vif *arvif;
+ u32 bitmap;
+
+ if (drop) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bitmap = ~(1 << WMI_MGMT_TID);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath10k_wmi_peer_flush(ar, arvif->vdev_id,
+ arvif->bssid, bitmap);
+ }
+ }
return;
+ }
mutex_lock(&ar->conf_mutex);
ath10k_mac_wait_tx_complete(ar);
@@ -8493,6 +8512,18 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+ if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
+ ar->hw->wiphy->max_sched_scan_reqs = 1;
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ }
+
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
ar->hw->txq_data_size = sizeof(struct ath10k_txq);
@@ -8542,9 +8573,10 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
- if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
- NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT);
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
/*
* on LL hardware queues are managed entirely by the FW
@@ -8635,12 +8667,6 @@ int ath10k_mac_register(struct ath10k *ar)
}
if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
- ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
- if (ret) {
- ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
- goto err_dfs_detector_exit;
- }
-
ar->hw->wiphy->features |=
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 873dbb65439f..01b4edb00e9e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1071,10 +1071,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
- u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
int i;
@@ -1088,9 +1087,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
@@ -1098,9 +1098,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- /* Copy caller's data to allocated DMA buf */
- memcpy(data_buf, data, orig_nbytes);
-
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
@@ -1113,12 +1110,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
- remaining_bytes = orig_nbytes;
- ce_data = ce_data_base;
+ remaining_bytes = nbytes;
while (remaining_bytes) {
/* FIXME: check cast */
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, nbytes);
+
/* Set up to receive directly into Target(!) address */
ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
@@ -1128,7 +1127,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
nbytes, 0, 0);
if (ret != 0)
goto done;
@@ -1171,12 +1170,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
remaining_bytes -= nbytes;
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
if (data_buf) {
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
new file mode 100644
index 000000000000..56cb1831dcdf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/string.h>
+#include <net/sock.h>
+
+#include "debug.h"
+#include "snoc.h"
+
+#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
+#define ATH10K_QMI_TIMEOUT 30
+
+static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
+ struct ath10k_msa_mem_info *mem_info)
+{
+ struct qcom_scm_vmperm dst_perms[3];
+ struct ath10k *ar = qmi->ar;
+ unsigned int src_perms;
+ u32 perm_count;
+ int ret;
+
+ src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
+ dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
+ dst_perms[0].perm = QCOM_SCM_PERM_RW;
+ dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
+ dst_perms[1].perm = QCOM_SCM_PERM_RW;
+
+ if (mem_info->secure) {
+ perm_count = 2;
+ } else {
+ dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
+ dst_perms[2].perm = QCOM_SCM_PERM_RW;
+ perm_count = 3;
+ }
+
+ ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
+ &src_perms, dst_perms, perm_count);
+ if (ret < 0)
+ ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
+ struct ath10k_msa_mem_info *mem_info)
+{
+ struct qcom_scm_vmperm dst_perms;
+ struct ath10k *ar = qmi->ar;
+ unsigned int src_perms;
+ int ret;
+
+ src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
+
+ if (!mem_info->secure)
+ src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
+
+ dst_perms.vmid = QCOM_SCM_VMID_HLOS;
+ dst_perms.perm = QCOM_SCM_PERM_RW;
+
+ ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
+ &src_perms, &dst_perms, 1);
+ if (ret < 0)
+ ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < qmi->nr_mem_region; i++) {
+ ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
+ if (ret)
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--)
+ ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
+ return ret;
+}
+
+static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
+{
+ int i;
+
+ for (i = 0; i < qmi->nr_mem_region; i++)
+ ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
+}
+
+static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_msa_info_resp_msg_v01 resp = {};
+ struct wlfw_msa_info_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+ int i;
+
+ req.msa_addr = qmi->msa_pa;
+ req.size = qmi->msa_mem_size;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_msa_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_MSA_INFO_REQ_V01,
+ WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_msa_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
+ ath10k_err(ar, "invalid memory region length received: %d\n",
+ resp.mem_region_info_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ qmi->nr_mem_region = resp.mem_region_info_len;
+ for (i = 0; i < resp.mem_region_info_len; i++) {
+ qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
+ qmi->mem_region[i].size = resp.mem_region_info[i].size;
+ qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
+ i, &qmi->mem_region[i].addr,
+ qmi->mem_region[i].size,
+ qmi->mem_region[i].secure);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_msa_ready_resp_msg_v01 resp = {};
+ struct wlfw_msa_ready_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_msa_ready_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_MSA_READY_REQ_V01,
+ WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_msa_ready_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
+{
+ struct wlfw_bdf_download_resp_msg_v01 resp = {};
+ struct wlfw_bdf_download_req_msg_v01 *req;
+ struct ath10k *ar = qmi->ar;
+ unsigned int remaining;
+ struct qmi_txn txn;
+ const u8 *temp;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ temp = ar->normal_mode_fw.board_data;
+ remaining = ar->normal_mode_fw.board_len;
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = 0;
+ req->total_size_valid = 1;
+ req->total_size = ar->normal_mode_fw.board_len;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "failed to download board data file: %d\n",
+ resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
+
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
+{
+ struct wlfw_cal_report_resp_msg_v01 resp = {};
+ struct wlfw_cal_report_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int i, j = 0;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
+ if (qmi->cal_data[i].total_size &&
+ qmi->cal_data[i].data) {
+ req.meta_data[j] = qmi->cal_data[i].cal_id;
+ j++;
+ }
+ }
+ req.meta_data_len = j;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_CAL_REPORT_REQ_V01,
+ WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cal_report_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send calibration request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_wlan_mode_resp_msg_v01 resp = {};
+ struct wlfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_wlan_mode_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_WLAN_MODE_REQ_V01,
+ WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ const char *version)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
+ struct wlfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_txn txn;
+ int ret;
+ u32 i;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_wlan_cfg_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ req->host_version_valid = 0;
+
+ req->tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req->tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req->tgt_cfg_len; i++) {
+ req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req->svc_cfg_len; i++) {
+ req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req->shadow_reg_valid = 1;
+ if (config->num_shadow_reg_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+ req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+ else
+ req->shadow_reg_len = config->num_shadow_reg_cfg;
+
+ memcpy(req->shadow_reg, config->shadow_reg_cfg,
+ sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_WLAN_CFG_REQ_V01,
+ WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send config request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+int ath10k_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ enum wlfw_driver_mode_enum_v01 mode,
+ const char *version)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
+ mode, config);
+
+ ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
+ if (ret) {
+ ath10k_err(ar, "failed to send qmi config: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
+ if (ret) {
+ ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_qmi_wlan_disable(struct ath10k *ar)
+{
+ return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
+}
+
+static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_cap_resp_msg_v01 *resp;
+ struct wlfw_cap_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_CAP_REQ_V01,
+ WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp->chip_info_valid) {
+ qmi->chip_info.chip_id = resp->chip_info.chip_id;
+ qmi->chip_info.chip_family = resp->chip_info.chip_family;
+ }
+
+ if (resp->board_info_valid)
+ qmi->board_info.board_id = resp->board_info.board_id;
+ else
+ qmi->board_info.board_id = 0xFF;
+
+ if (resp->soc_info_valid)
+ qmi->soc_info.soc_id = resp->soc_info.soc_id;
+
+ if (resp->fw_version_info_valid) {
+ qmi->fw_version = resp->fw_version_info.fw_version;
+ strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
+ sizeof(qmi->fw_build_timestamp));
+ }
+
+ if (resp->fw_build_id_valid)
+ strlcpy(qmi->fw_build_id, resp->fw_build_id,
+ MAX_BUILD_ID_LEN + 1);
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
+ qmi->chip_info.chip_id, qmi->chip_info.chip_family,
+ qmi->board_info.board_id, qmi->soc_info.soc_id);
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
+
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(resp);
+ return ret;
+}
+
+static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
+{
+ struct wlfw_host_cap_resp_msg_v01 resp = {};
+ struct wlfw_host_cap_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ req.daemon_support_valid = 1;
+ req.daemon_support = 0;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_HOST_CAP_REQ_V01,
+ WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send host capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_ind_register_resp_msg_v01 resp = {};
+ struct wlfw_ind_register_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ req.client_id_valid = 1;
+ req.client_id = ATH10K_QMI_CLIENT_ID;
+ req.fw_ready_enable_valid = 1;
+ req.fw_ready_enable = 1;
+ req.msa_ready_enable_valid = 1;
+ req.msa_ready_enable = 1;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_ind_register_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_IND_REGISTER_REQ_V01,
+ WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ind_register_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send indication registed request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.fw_status_valid) {
+ if (resp.fw_status & QMI_WLFW_FW_READY_V01)
+ qmi->fw_ready = true;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
+ if (ret)
+ return;
+
+ if (qmi->fw_ready) {
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
+ return;
+ }
+
+ ret = ath10k_qmi_host_cap_send_sync(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_setup_msa_permissions(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
+ if (ret)
+ goto err_setup_msa;
+
+ ret = ath10k_qmi_cap_send_sync_msg(qmi);
+ if (ret)
+ goto err_setup_msa;
+
+ return;
+
+err_setup_msa:
+ ath10k_qmi_remove_msa_permission(qmi);
+}
+
+static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ar->hif.bus = ATH10K_BUS_SNOC;
+ ar->id.qmi_ids_valid = true;
+ ar->id.qmi_board_id = qmi->board_info.board_id;
+ ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
+
+ return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
+}
+
+static int
+ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
+ enum ath10k_qmi_driver_event_type type,
+ void *data)
+{
+ struct ath10k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ath10k_qmi_remove_msa_permission(qmi);
+ ath10k_core_free_board_files(ar);
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
+}
+
+static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
+{
+ int ret;
+
+ ret = ath10k_qmi_fetch_board_file(qmi);
+ if (ret)
+ goto out;
+
+ ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
+ if (ret)
+ goto out;
+
+ ret = ath10k_qmi_send_cal_report_req(qmi);
+
+out:
+ return;
+}
+
+static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
+
+ return 0;
+}
+
+static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
+}
+
+static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
+}
+
+static struct qmi_msg_handler qmi_msg_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = wlfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
+ .fn = ath10k_qmi_fw_ready_ind,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_MSA_READY_IND_V01,
+ .ei = wlfw_msa_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
+ .fn = ath10k_qmi_msa_ready_ind,
+ },
+ {}
+};
+
+static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
+ sizeof(qmi->sq), 0);
+ if (ret) {
+ ath10k_err(ar, "failed to connect to a remote QMI service port\n");
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath10k_qmi *qmi =
+ container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ qmi->fw_ready = false;
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static struct qmi_ops ath10k_qmi_ops = {
+ .new_server = ath10k_qmi_new_server,
+ .del_server = ath10k_qmi_del_server,
+};
+
+static void ath10k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
+ event_work);
+ struct ath10k_qmi_driver_event *event;
+ struct ath10k *ar = qmi->ar;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath10k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ switch (event->type) {
+ case ATH10K_QMI_EVENT_SERVER_ARRIVE:
+ ath10k_qmi_event_server_arrive(qmi);
+ break;
+ case ATH10K_QMI_EVENT_SERVER_EXIT:
+ ath10k_qmi_event_server_exit(qmi);
+ break;
+ case ATH10K_QMI_EVENT_FW_READY_IND:
+ ath10k_qmi_event_fw_ready_ind(qmi);
+ break;
+ case ATH10K_QMI_EVENT_MSA_READY_IND:
+ ath10k_qmi_event_msa_ready(qmi);
+ break;
+ default:
+ ath10k_warn(ar, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
+{
+ struct ath10k *ar = qmi->ar;
+ struct device *dev = ar->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+ of_node_put(node);
+
+ qmi->msa_pa = r.start;
+ qmi->msa_mem_size = resource_size(&r);
+ qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
+ MEMREMAP_WT);
+ if (!qmi->msa_pa) {
+ dev_err(dev, "failed to map memory region: %pa\n", &r.start);
+ return -EBUSY;
+ }
+ } else {
+ qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
+ &qmi->msa_pa, GFP_KERNEL);
+ if (!qmi->msa_va) {
+ ath10k_err(ar, "failed to allocate dma memory for msa region\n");
+ return -ENOMEM;
+ }
+ qmi->msa_mem_size = msa_size;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
+ &qmi->msa_pa,
+ qmi->msa_va);
+
+ return 0;
+}
+
+int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi;
+ int ret;
+
+ qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
+ if (!qmi)
+ return -ENOMEM;
+
+ qmi->ar = ar;
+ ar_snoc->qmi = qmi;
+
+ ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
+ if (ret)
+ goto err;
+
+ ret = qmi_handle_init(&qmi->qmi_hdl,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ &ath10k_qmi_ops, qmi_msg_handler);
+ if (ret)
+ goto err;
+
+ qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!qmi->event_wq) {
+ ath10k_err(ar, "failed to allocate workqueue\n");
+ ret = -EFAULT;
+ goto err_release_qmi_handle;
+ }
+
+ INIT_LIST_HEAD(&qmi->event_list);
+ spin_lock_init(&qmi->event_lock);
+ INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01, 0);
+ if (ret)
+ goto err_qmi_lookup;
+
+ return 0;
+
+err_qmi_lookup:
+ destroy_workqueue(qmi->event_wq);
+
+err_release_qmi_handle:
+ qmi_handle_release(&qmi->qmi_hdl);
+
+err:
+ kfree(qmi);
+ return ret;
+}
+
+int ath10k_qmi_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+
+ qmi_handle_release(&qmi->qmi_hdl);
+ cancel_work_sync(&qmi->event_work);
+ destroy_workqueue(qmi->event_wq);
+ ar_snoc->qmi = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
new file mode 100644
index 000000000000..1efe1d22fc2f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _ATH10K_QMI_H_
+#define _ATH10K_QMI_H_
+
+#include <linux/soc/qcom/qmi.h>
+#include <linux/qrtr.h>
+#include "qmi_wlfw_v01.h"
+
+#define MAX_NUM_MEMORY_REGIONS 2
+#define MAX_TIMESTAMP_LEN 32
+#define MAX_BUILD_ID_LEN 128
+#define MAX_NUM_CAL_V01 5
+
+enum ath10k_qmi_driver_event_type {
+ ATH10K_QMI_EVENT_SERVER_ARRIVE,
+ ATH10K_QMI_EVENT_SERVER_EXIT,
+ ATH10K_QMI_EVENT_FW_READY_IND,
+ ATH10K_QMI_EVENT_FW_DOWN_IND,
+ ATH10K_QMI_EVENT_MSA_READY_IND,
+ ATH10K_QMI_EVENT_MAX,
+};
+
+struct ath10k_msa_mem_info {
+ phys_addr_t addr;
+ u32 size;
+ bool secure;
+};
+
+struct ath10k_qmi_chip_info {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct ath10k_qmi_board_info {
+ u32 board_id;
+};
+
+struct ath10k_qmi_soc_info {
+ u32 soc_id;
+};
+
+struct ath10k_qmi_cal_data {
+ u32 cal_id;
+ u32 total_size;
+ u8 *data;
+};
+
+struct ath10k_tgt_pipe_cfg {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ath10k_svc_pipe_cfg {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct ath10k_shadow_reg_cfg {
+ __le16 ce_id;
+ __le16 reg_offset;
+};
+
+struct ath10k_qmi_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct ath10k_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct ath10k_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
+};
+
+struct ath10k_qmi_driver_event {
+ struct list_head list;
+ enum ath10k_qmi_driver_event_type type;
+ void *data;
+};
+
+struct ath10k_qmi {
+ struct ath10k *ar;
+ struct qmi_handle qmi_hdl;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ u32 nr_mem_region;
+ struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
+ dma_addr_t msa_pa;
+ u32 msa_mem_size;
+ void *msa_va;
+ struct ath10k_qmi_chip_info chip_info;
+ struct ath10k_qmi_board_info board_info;
+ struct ath10k_qmi_soc_info soc_info;
+ char fw_build_id[MAX_BUILD_ID_LEN + 1];
+ u32 fw_version;
+ bool fw_ready;
+ char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
+ struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
+};
+
+int ath10k_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ enum wlfw_driver_mode_enum_v01 mode,
+ const char *version);
+int ath10k_qmi_wlan_disable(struct ath10k *ar);
+int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
+int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
+int ath10k_qmi_deinit(struct ath10k *ar);
+
+#endif /* ATH10K_QMI_H */
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
new file mode 100644
index 000000000000..ba79c2e4aed6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -0,0 +1,2072 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/soc/qcom/qmi.h>
+#include <linux/types.h>
+#include "qmi_wlfw_v01.h"
+
+static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ secure_flag),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg),
+ .ei_array = wlfw_mem_cfg_s_v01_ei,
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ type),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_MEM_REG_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ size),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
+ xo_cal_data),
+ },
+ {}
+};
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
new file mode 100644
index 000000000000..c5e3870b8871
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WCN3990_QMI_SVC_V01_H
+#define WCN3990_QMI_SVC_V01_H
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_MEM_REG_V01 2
+#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_SHADOW_REG_V2 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+enum wlfw_mem_type_enum_v01 {
+ QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL)
+#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_seg_s_v01 {
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct wlfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 mem_ready_enable_valid;
+ u8 mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
+extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
+extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+ u8 daemon_support_valid;
+ u8 daemon_support;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
+extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
+extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+struct wlfw_xo_cal_ind_msg_v01 {
+ u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f7b5b855aab2..8d3d9bca410f 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -67,6 +67,72 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990,
.dma_mask = DMA_BIT_MASK(37),
+ .msa_size = 0x100000,
+};
+
+#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
+#define WCN3990_DST_WR_IDX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
+ {
+ .ce_id = __cpu_to_le16(0),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(3),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(4),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(5),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(7),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(1),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(2),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(7),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(8),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(9),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(10),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(11),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
};
static struct ce_attr host_ce_config_wlan[] = {
@@ -176,6 +242,128 @@ static struct ce_attr host_ce_config_wlan[] = {
},
};
+static struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(1024),
+ .nbytes_max = __cpu_to_le32(64),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(4),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 Target to uMC */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(11),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
static struct service_to_pipe target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
@@ -766,11 +954,47 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar)
static int ath10k_snoc_wlan_enable(struct ath10k *ar)
{
- return 0;
+ struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
+ struct ath10k_qmi_wlan_enable_cfg cfg;
+ enum wlfw_driver_mode_enum_v01 mode;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
+ tgt_cfg[pipe_num].pipe_num =
+ target_ce_config_wlan[pipe_num].pipenum;
+ tgt_cfg[pipe_num].pipe_dir =
+ target_ce_config_wlan[pipe_num].pipedir;
+ tgt_cfg[pipe_num].nentries =
+ target_ce_config_wlan[pipe_num].nentries;
+ tgt_cfg[pipe_num].nbytes_max =
+ target_ce_config_wlan[pipe_num].nbytes_max;
+ tgt_cfg[pipe_num].flags =
+ target_ce_config_wlan[pipe_num].flags;
+ tgt_cfg[pipe_num].reserved = 0;
+ }
+
+ cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
+ sizeof(struct ath10k_tgt_pipe_cfg);
+ cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
+ &tgt_cfg;
+ cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
+ sizeof(struct ath10k_svc_pipe_cfg);
+ cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
+ &target_service_to_ce_map_wlan;
+ cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
+ sizeof(struct ath10k_shadow_reg_cfg);
+ cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
+ &target_shadow_reg_cfg_map;
+
+ mode = QMI_WLFW_MISSION_V01;
+
+ return ath10k_qmi_wlan_enable(ar, &cfg, mode,
+ NULL);
}
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{
+ ath10k_qmi_wlan_disable(ar);
}
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
@@ -957,6 +1181,32 @@ out:
return ret;
}
+int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_bus_params bus_params;
+ int ret;
+
+ switch (type) {
+ case ATH10K_QMI_EVENT_FW_READY_IND:
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.chip_id = ar_snoc->target_info.soc_version;
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n",
+ ret);
+ }
+ break;
+ case ATH10K_QMI_EVENT_FW_DOWN_IND:
+ break;
+ default:
+ ath10k_err(ar, "invalid fw indication: %llx\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ath10k_snoc_setup_resource(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1281,9 +1531,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
struct ath10k_snoc *ar_snoc;
struct device *dev;
struct ath10k *ar;
+ u32 msa_size;
int ret;
u32 i;
- struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
if (!of_id) {
@@ -1313,6 +1563,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ar = ar;
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce;
+ msa_size = drv_data->msa_size;
ret = ath10k_snoc_resource_init(ar);
if (ret) {
@@ -1351,12 +1602,10 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
- bus_params.dev_type = ATH10K_DEV_TYPE_LL;
- bus_params.chip_id = drv_data->hw_rev;
- ret = ath10k_core_register(ar, &bus_params);
+ ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
- ath10k_err(ar, "failed to register driver core: %d\n", ret);
- goto err_hw_power_off;
+ ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
+ goto err_core_destroy;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
@@ -1364,9 +1613,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
return 0;
-err_hw_power_off:
- ath10k_hw_power_off(ar);
-
err_free_irq:
ath10k_snoc_free_irq(ar);
@@ -1388,6 +1634,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_hw_power_off(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
+ ath10k_qmi_deinit(ar);
ath10k_core_destroy(ar);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index f9e530189d48..e1d2d6675556 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -19,10 +19,12 @@
#include "hw.h"
#include "ce.h"
+#include "qmi.h"
struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev;
u64 dma_mask;
+ u32 msa_size;
};
struct snoc_state {
@@ -81,6 +83,7 @@ struct ath10k_snoc {
struct timer_list rx_post_retry;
struct ath10k_wcn3990_vreg_info *vreg;
struct ath10k_wcn3990_clk_info *clk;
+ struct ath10k_qmi *qmi;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -90,5 +93,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
+int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 7fd63bbf8e24..7978a7783f90 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -210,6 +210,9 @@ struct wmi_ops {
u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
@@ -1361,6 +1364,24 @@ ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
}
static inline int
+ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_config_pno)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 731ceaed4d5a..bab8b2527fb8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3441,6 +3441,192 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
return skb;
}
+/* Request FW to start PNO operation */
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_tlv_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ __le32 *channel_list;
+ u16 tlv_len;
+ size_t len;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
+ WMI_NLO_MAX_CHAN);
+ len += sizeof(struct nlo_configured_parameters) *
+ min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* wmi_tlv_wow_nlo_config_cmd parameters*/
+ cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
+ cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ /* copy scan interval */
+ cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
+ WMI_NLO_MAX_SSIDS));
+ tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
+ sizeof(struct nlo_configured_parameters);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
+ sizeof(*tlv));
+
+ /* copy ssid and it's length */
+ nlo_list[i].ssid.valid = __cpu_to_le32(true);
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ /* copy rssi threshold */
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
+ nlo_list[i].rssi_cond.rssi =
+ __cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ __cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
+
+ /* copy channel info */
+ cmd->num_of_channels = __cpu_to_le32(min_t(u8,
+ pno->a_networks[0].channel_count,
+ WMI_NLO_MAX_CHAN));
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
+ sizeof(u_int32_t));
+ ptr += sizeof(*tlv);
+
+ channel_list = (__le32 *)ptr;
+ for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
+ channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+/* Request FW to stop ongoing PNO operation */
+static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
+ u32 vdev_id)
+{
+ struct wmi_tlv_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* channel list */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ if (pno_scan->enable)
+ return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
{
@@ -3973,6 +4159,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 4f0c20c90642..92c25f51bf86 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -2146,6 +2146,260 @@ struct wmi_tlv_tdls_peer_event {
void ath10k_wmi_tlv_attach(struct ath10k *ar);
+enum wmi_nlo_auth_algorithm {
+ WMI_NLO_AUTH_ALGO_80211_OPEN = 1,
+ WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2,
+ WMI_NLO_AUTH_ALGO_WPA = 3,
+ WMI_NLO_AUTH_ALGO_WPA_PSK = 4,
+ WMI_NLO_AUTH_ALGO_WPA_NONE = 5,
+ WMI_NLO_AUTH_ALGO_RSNA = 6,
+ WMI_NLO_AUTH_ALGO_RSNA_PSK = 7,
+};
+
+enum wmi_nlo_cipher_algorithm {
+ WMI_NLO_CIPHER_ALGO_NONE = 0x00,
+ WMI_NLO_CIPHER_ALGO_WEP40 = 0x01,
+ WMI_NLO_CIPHER_ALGO_TKIP = 0x02,
+ WMI_NLO_CIPHER_ALGO_CCMP = 0x04,
+ WMI_NLO_CIPHER_ALGO_WEP104 = 0x05,
+ WMI_NLO_CIPHER_ALGO_BIP = 0x06,
+ WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100,
+ WMI_NLO_CIPHER_ALGO_WEP = 0x101,
+};
+
+/* SSID broadcast type passed in NLO params */
+enum wmi_nlo_ssid_bcastnwtype {
+ WMI_NLO_BCAST_UNKNOWN = 0,
+ WMI_NLO_BCAST_NORMAL = 1,
+ WMI_NLO_BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP (0x1 << 0)
+#define WMI_NLO_CONFIG_START (0x1 << 1)
+#define WMI_NLO_CONFIG_RESET (0x1 << 2)
+#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
+#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
+#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13)
+
+/* Whether directed scan needs to be performed (for hidden SSIDs) */
+#define WMI_ENLO_FLAG_DIRECTED_SCAN 1
+
+/* Whether PNO event shall be triggered if the network is found on A band */
+#define WMI_ENLO_FLAG_A_BAND 2
+
+/* Whether PNO event shall be triggered if the network is found on G band */
+#define WMI_ENLO_FLAG_G_BAND 4
+
+/* Whether strict matching is required (i.e. firmware shall not
+ * match on the entire SSID)
+ */
+#define WMI_ENLO_FLAG_STRICT_MATCH 8
+
+/* Code for matching the beacon AUTH IE - additional codes TBD */
+/* open */
+#define WMI_ENLO_AUTH_CODE_OPEN 1
+
+/* WPA_PSK or WPA2PSK */
+#define WMI_ENLO_AUTH_CODE_PSK 2
+
+/* any EAPOL */
+#define WMI_ENLO_AUTH_CODE_EAPOL 4
+
+struct wmi_nlo_ssid_param {
+ __le32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ __le32 valid;
+
+ /* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled.
+ * The value should be true/false. Otherwise EPNO is enabled.
+ * bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX
+ */
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+/* Support channel prediction for PNO scan after scanning top_k_num channels
+ * if stationary_threshold is met.
+ */
+struct nlo_channel_prediction_cfg {
+ __le32 tlv_header;
+
+ /* Enable or disable this feature. */
+ __le32 enable;
+
+ /* Top K channels will be scanned before deciding whether to further scan
+ * or stop. Minimum value is 3 and maximum is 5.
+ */
+ __le32 top_k_num;
+
+ /* Preconfigured stationary threshold.
+ * Lesser value means more conservative. Bigger value means more aggressive.
+ * Maximum is 100 and mininum is 0.
+ */
+ __le32 stationary_threshold;
+
+ /* Periodic full channel scan in milliseconds unit.
+ * After full_scan_period_ms since last full scan, channel prediction
+ * scan is suppressed and will do full scan.
+ * This is to help detecting sudden AP power-on or -off. Value 0 means no
+ * full scan at all (not recommended).
+ */
+ __le32 full_scan_period_ms;
+} __packed;
+
+struct enlo_candidate_score_params_t {
+ __le32 tlv_header; /* TLV tag and len; */
+
+ /* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */
+ __le32 min_5ghz_rssi;
+
+ /* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */
+ __le32 min_24ghz_rssi;
+
+ /* the maximum score that a network can have before bonuses */
+ __le32 initial_score_max;
+
+ /* current_connection_bonus:
+ * only report when there is a network's score this much higher
+ * than the current connection
+ */
+ __le32 current_connection_bonus;
+
+ /* score bonus for all networks with the same network flag */
+ __le32 same_network_bonus;
+
+ /* score bonus for networks that are not open */
+ __le32 secure_bonus;
+
+ /* 5GHz RSSI score bonus (applied to all 5GHz networks) */
+ __le32 band_5ghz_bonus;
+} __packed;
+
+struct connected_nlo_bss_band_rssi_pref_t {
+ __le32 tlv_header; /* TLV tag and len;*/
+
+ /* band which needs to get preference over other band
+ * - see wmi_set_vdev_ie_band enum
+ */
+ __le32 band;
+
+ /* Amount of RSSI preference (in dB) that can be given to a band */
+ __le32 rssi_pref;
+} __packed;
+
+struct connected_nlo_rssi_params_t {
+ __le32 tlv_header; /* TLV tag and len;*/
+
+ /* Relative rssi threshold (in dB) by which new BSS should have
+ * better rssi than the current connected BSS.
+ */
+ __le32 relative_rssi;
+
+ /* The amount of rssi preference (in dB) that can be given
+ * to a 5G BSS over 2.4G BSS.
+ */
+ __le32 relative_rssi_5g_pref;
+} __packed;
+
+struct wmi_tlv_wow_nlo_config_cmd {
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time; /* PDT in msecs */
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ __le32 max_rest_time;
+
+ /* SBM */
+ __le32 scan_backoff_multiplier;
+
+ /* SCBM */
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /** MAC Address to use in Probe Req as SA **/
+ struct wmi_mac_addr mac_addr;
+
+ /** Mask on which MAC has to be randomized **/
+ struct wmi_mac_addr mac_mask;
+
+ /** IE bitmap to use in Probe Req **/
+ __le32 ie_bitmap[8];
+
+ /** Number of vendor OUIs. In the TLV vendor_oui[] **/
+ __le32 num_vendor_oui;
+
+ /** Number of connected NLO band preferences **/
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * A_UINT32 channel_list[num_of_channels];
+ * nlo_channel_prediction_cfg ch_prediction_cfg;
+ * enlo_candidate_score_params candidate_score_params;
+ * wmi_vendor_oui vendor_oui[num_vendor_oui];
+ * connected_nlo_rssi_params cnlo_rssi_params;
+ * connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref];
+ */
+} __packed;
+
struct wmi_tlv_mgmt_tx_cmd {
__le32 vdev_id;
__le32 desc_id;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f67c52757ea6..f7badd079051 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -7068,6 +7068,63 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin;
} __packed;
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_SSID_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+} __packed;
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+} __packed;
+
enum wmi_host_platform_type {
WMI_HOST_PLATFORM_HIGH_PERF,
WMI_HOST_PLATFORM_LOW_PERF,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index af444dfecae9..51b26b305885 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -180,6 +180,100 @@ static void ath10k_wow_convert_8023_to_80211
}
}
+static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j, ret = 0;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /*Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ __le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath10k_warn(ar, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+ return ret;
+}
+
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
@@ -213,6 +307,26 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
break;
default:
break;
@@ -299,6 +413,51 @@ static int ath10k_wow_set_wakeups(struct ath10k *ar,
return 0;
}
+static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
+{
+ int ret = 0;
+ struct ath10k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ath10k_wow_enable(struct ath10k *ar)
{
int ret;
@@ -436,6 +595,10 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
if (ret)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+ ret = ath10k_wow_nlo_cleanup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
+
exit:
if (ret) {
switch (ar->state) {
@@ -475,6 +638,11 @@ int ath10k_wow_init(struct ath10k *ar)
ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
}
+ if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index a3668433dc02..988222cea9df 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -755,11 +755,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
}
if (main_ant_conf == rx_ant_conf) {
- ANT_STAT_INC(ANT_MAIN, recv_cnt);
- ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
+ ANT_STAT_INC(sc, ANT_MAIN, recv_cnt);
+ ANT_LNA_INC(sc, ANT_MAIN, rx_ant_conf);
} else {
- ANT_STAT_INC(ANT_ALT, recv_cnt);
- ANT_LNA_INC(ANT_ALT, rx_ant_conf);
+ ANT_STAT_INC(sc, ANT_ALT, recv_cnt);
+ ANT_LNA_INC(sc, ANT_ALT, rx_ant_conf);
}
/* Short scan check */
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 6a43d26276e5..6aa3ec024ffa 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -624,9 +624,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
tsf, freq, chan_type);
if (ret == 0)
- RX_STAT_INC(rx_spectral_sample_good);
+ RX_STAT_INC(sc, rx_spectral_sample_good);
else
- RX_STAT_INC(rx_spectral_sample_err);
+ RX_STAT_INC(sc, rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
@@ -642,9 +642,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
tsf, freq, chan_type);
if (ret == 0)
- RX_STAT_INC(rx_spectral_sample_good);
+ RX_STAT_INC(sc, rx_spectral_sample_good);
else
- RX_STAT_INC(rx_spectral_sample_err);
+ RX_STAT_INC(sc, rx_spectral_sample_err);
/* Mix the received bins to the /dev/random
* pool
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c871b7ec5011..4399e9ad058f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -785,35 +785,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
{
int qnum = txq->axq_qnum;
- TX_STAT_INC(qnum, tx_pkts_all);
+ TX_STAT_INC(sc, qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
if (flags & ATH_TX_ERROR)
- TX_STAT_INC(qnum, a_xretries);
+ TX_STAT_INC(sc, qnum, a_xretries);
else
- TX_STAT_INC(qnum, a_completed);
+ TX_STAT_INC(sc, qnum, a_completed);
} else {
if (ts->ts_status & ATH9K_TXERR_XRETRY)
- TX_STAT_INC(qnum, xretries);
+ TX_STAT_INC(sc, qnum, xretries);
else
- TX_STAT_INC(qnum, completed);
+ TX_STAT_INC(sc, qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FILT)
- TX_STAT_INC(qnum, txerr_filtered);
+ TX_STAT_INC(sc, qnum, txerr_filtered);
if (ts->ts_status & ATH9K_TXERR_FIFO)
- TX_STAT_INC(qnum, fifo_underrun);
+ TX_STAT_INC(sc, qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
- TX_STAT_INC(qnum, xtxop);
+ TX_STAT_INC(sc, qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
- TX_STAT_INC(qnum, timer_exp);
+ TX_STAT_INC(sc, qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
- TX_STAT_INC(qnum, desc_cfg_err);
+ TX_STAT_INC(sc, qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
- TX_STAT_INC(qnum, data_underrun);
+ TX_STAT_INC(sc, qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
- TX_STAT_INC(qnum, delim_underrun);
+ TX_STAT_INC(sc, qnum, delim_underrun);
}
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 249f8141cd00..79607db14387 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -25,17 +25,17 @@ struct ath_buf;
struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
-#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
-#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
-#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
-#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
-#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
+#define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0)
+#define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0)
+#define RESET_STAT_INC(sc, type) do { (sc)->debug.stats.reset[type]++; } while (0)
+#define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0)
+#define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0)
#else
-#define TX_STAT_INC(q, c) do { } while (0)
-#define RX_STAT_INC(c)
-#define RESET_STAT_INC(sc, type) do { } while (0)
-#define ANT_STAT_INC(i, c) do { } while (0)
-#define ANT_LNA_INC(i, c) do { } while (0)
+#define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0)
+#define RX_STAT_INC(sc, c) do { (void)(sc); } while (0)
+#define RESET_STAT_INC(sc, type) do { (void)(sc); } while (0)
+#define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0)
+#define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0)
#endif
enum ath_reset_type {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c85f613e8ceb..1e3b5f4a4cf9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -809,7 +809,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, XMIT, "TX failed\n");
- TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
+ TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed);
goto exit;
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a8ac42c96d71..30d1bd832d90 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -829,7 +829,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* Discard zero-length packets and packets smaller than an ACK
*/
if (rx_stats->rs_datalen < 10) {
- RX_STAT_INC(rx_len_err);
+ RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
@@ -839,7 +839,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* those frames.
*/
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
- RX_STAT_INC(rx_len_err);
+ RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
@@ -880,7 +880,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
} else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
rx_status->mactime)) {
- RX_STAT_INC(rx_spectral);
+ RX_STAT_INC(sc, rx_spectral);
}
return -EINVAL;
}
@@ -898,7 +898,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
spin_unlock_bh(&sc->chan_lock);
if (ath_is_mybeacon(common, hdr)) {
- RX_STAT_INC(rx_beacons);
+ RX_STAT_INC(sc, rx_beacons);
rx_stats->is_mybeacon = true;
}
@@ -915,7 +915,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
*/
ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
- RX_STAT_INC(rx_rate_err);
+ RX_STAT_INC(sc, rx_rate_err);
return -EINVAL;
}
@@ -1136,7 +1136,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb) {
- RX_STAT_INC(rx_oom_err);
+ RX_STAT_INC(sc, rx_oom_err);
goto requeue_drop_frag;
}
@@ -1164,7 +1164,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
rxs, decrypt_error);
if (rs.rs_more) {
- RX_STAT_INC(rx_frags);
+ RX_STAT_INC(sc, rx_frags);
/*
* rs_more indicates chained descriptors which can be
* used to link buffers together for a sort of
@@ -1174,7 +1174,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
/* too many fragments - cannot handle frame */
dev_kfree_skb_any(sc->rx.frag);
dev_kfree_skb_any(skb);
- RX_STAT_INC(rx_too_many_frags_err);
+ RX_STAT_INC(sc, rx_too_many_frags_err);
skb = NULL;
}
sc->rx.frag = skb;
@@ -1186,7 +1186,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
dev_kfree_skb(skb);
- RX_STAT_INC(rx_oom_err);
+ RX_STAT_INC(sc, rx_oom_err);
goto requeue_drop_frag;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 43b6c8508e49..25b3fc82d4ac 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -391,7 +391,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_hdr *hdr;
int prev = fi->retries;
- TX_STAT_INC(txq->axq_qnum, a_retries);
+ TX_STAT_INC(sc, txq->axq_qnum, a_retries);
fi->retries += count;
if (prev > 0)
@@ -1105,7 +1105,7 @@ finish:
al = get_frame_info(bf->bf_mpdu)->framelen;
bf->bf_state.bf_type = BUF_AMPDU;
} else {
- TX_STAT_INC(txq->axq_qnum, a_aggr);
+ TX_STAT_INC(sc, txq->axq_qnum, a_aggr);
}
return al;
@@ -1727,7 +1727,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
bf_tail = bf;
nframes--;
sent++;
- TX_STAT_INC(txq->axq_qnum, a_queued_hw);
+ TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
if (an->sta && skb_queue_empty(&tid->retry_q))
ieee80211_sta_set_buffered(an->sta, i, false);
@@ -2110,14 +2110,14 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
if (puttxbuf) {
- TX_STAT_INC(txq->axq_qnum, puttxbuf);
+ TX_STAT_INC(sc, txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
if (!edma || sc->tx99_state) {
- TX_STAT_INC(txq->axq_qnum, txstart);
+ TX_STAT_INC(sc, txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
@@ -2154,7 +2154,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
- TX_STAT_INC(txq->axq_qnum, queued);
+ TX_STAT_INC(sc, txq->axq_qnum, queued);
}
static void setup_frame_info(struct ieee80211_hw *hw,
@@ -2486,7 +2486,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
- TX_STAT_INC(txctl.txq->axq_qnum, queued);
+ TX_STAT_INC(sc, txctl.txq->axq_qnum, queued);
ath_txq_unlock(sc, txctl.txq);
}
@@ -2699,7 +2699,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (status == -EINPROGRESS)
break;
- TX_STAT_INC(txq->axq_qnum, txprocdesc);
+ TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
/*
* Remove ath_buf's of the same transmit unit from txq,
@@ -2778,7 +2778,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_txq_lock(sc, txq);
- TX_STAT_INC(txq->axq_qnum, txprocdesc);
+ TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
fifo_list = &txq->txq_fifo[txq->txq_tailidx];
if (list_empty(fifo_list)) {
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 66ffae2de86e..aa50813a0595 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -416,8 +416,8 @@ static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
- wil_debugfs_iomem_x32_set, "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
umode_t mode,
@@ -432,7 +432,8 @@ static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
data->wil = wil;
data->offset = value;
- file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+ file = debugfs_create_file_unsafe(name, mode, parent, data,
+ &fops_iomem_x32);
if (!IS_ERR_OR_NULL(file))
wil->dbg_data.iomem_data_count++;
@@ -451,14 +452,15 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
- wil_debugfs_ulong_set, "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
+ wil_debugfs_ulong_set, "0x%llx\n");
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent,
ulong *value)
{
- return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong);
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ &wil_fops_ulong);
}
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 6255fb6d97a7..81ff558046a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_bh(&wl->lock);
+ wl->wlc->vif = vif;
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
+ struct brcms_info *wl = hw->priv;
+
+ spin_lock_bh(&wl->lock);
+ wl->wlc->vif = NULL;
+ spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
spin_unlock_bh(&wl->lock);
}
+static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct brcms_info *wl = hw->priv;
+ struct sk_buff *beacon = NULL;
+ u16 tim_offset = 0;
+
+ spin_lock_bh(&wl->lock);
+ if (wl->wlc->vif)
+ beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
+ &tim_offset, NULL);
+ if (beacon)
+ brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
+ wl->wlc->vif->bss_conf.dtim_period);
+ spin_unlock_bh(&wl->lock);
+
+ return 0;
+}
+
static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx,
.start = brcms_ops_start,
@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
.get_tsf = brcms_ops_get_tsf,
.set_tsf = brcms_ops_set_tsf,
+ .set_tim = brcms_ops_beacon_set_tim,
};
void brcms_dpc(unsigned long data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index c4d135cff04a..9f76b880814e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -563,6 +563,7 @@ struct brcms_c_info {
struct wiphy *wiphy;
struct scb pri_scb;
+ struct ieee80211_vif *vif;
struct sk_buff *beacon;
u16 beacon_tim_offset;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index c3c638ed0ed7..ce4144a89217 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1297,6 +1297,8 @@ il4965_send_rxon_assoc(struct il_priv *il)
const struct il_rxon_cmd *rxon1 = &il->staging;
const struct il_rxon_cmd *rxon2 = &il->active;
+ lockdep_assert_held(&il->mutex);
+
if (rxon1->flags == rxon2->flags &&
rxon1->filter_flags == rxon2->filter_flags &&
rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f44c716b1130..c16757051f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1154,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
-void iwl_fw_error_dump_wk(struct work_struct *work)
+/* this function assumes dump_start was called beforehand and dump_end will be
+ * called afterwards
+ */
+void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
{
- struct iwl_fw_runtime *fwrt =
- container_of(work, struct iwl_fw_runtime, dump.wk.work);
struct iwl_fw_dbg_params params = {0};
- if (fwrt->ops && fwrt->ops->dump_start &&
- fwrt->ops->dump_start(fwrt->ops_ctx))
+ if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
@@ -1169,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
- goto out;
+ return;
}
iwl_fw_dbg_stop_recording(fwrt, &params);
@@ -1183,7 +1183,20 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
udelay(500);
iwl_fw_dbg_restart_recording(fwrt, &params);
}
-out:
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
+
+void iwl_fw_error_dump_wk(struct work_struct *work)
+{
+ struct iwl_fw_runtime *fwrt =
+ container_of(work, struct iwl_fw_runtime, dump.wk.work);
+
+ if (fwrt->ops && fwrt->ops->dump_start &&
+ fwrt->ops->dump_start(fwrt->ops_ctx))
+ return;
+
+ iwl_fw_dbg_collect_sync(fwrt);
+
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index d9578dcec24c..6f8d3256f7b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -367,4 +367,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
+void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 2cc6c019d0e1..420e6d745f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -30,38 +30,20 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_data
-TRACE_EVENT(iwlwifi_dev_tx_data,
- TP_PROTO(const struct device *dev,
- struct sk_buff *skb, u8 hdr_len),
- TP_ARGS(dev, skb, hdr_len),
+TRACE_EVENT(iwlwifi_dev_tx_tb,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ u8 *data_src, size_t data_len),
+ TP_ARGS(dev, skb, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data,
- iwl_trace_data(skb) ? skb->len - hdr_len : 0)
+ iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
- skb_copy_bits(skb, hdr_len,
- __get_dynamic_array(data),
- skb->len - hdr_len);
- ),
- TP_printk("[%s] TX frame data", __get_str(dev))
-);
-
-TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
- TP_PROTO(const struct device *dev,
- u8 *data_src, size_t data_len),
- TP_ARGS(dev, data_src, data_len),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __dynamic_array(u8, data, data_len)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- memcpy(__get_dynamic_array(data), data_src, data_len);
+ memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 210be26aadaa..843f3b41b72e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
{
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
struct wowlan_key_data key_data = {
- .configure_keys = !d0i3,
+ .configure_keys = !d0i3 && !unified,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
@@ -1636,32 +1638,10 @@ out_free_resp:
}
static struct iwl_wowlan_status *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
{
- u32 base = mvm->error_event_table[0];
- struct error_table_start {
- /* cf. struct iwl_error_event_table */
- u32 valid;
- u32 error_id;
- } err_info;
int ret;
- iwl_trans_read_mem_bytes(mvm->trans, base,
- &err_info, sizeof(err_info));
-
- if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
- err_info.valid, err_info.error_id);
- if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
- }
- return ERR_PTR(-EIO);
- }
-
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
@@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ fw_status = iwl_mvm_get_wakeup_status(mvm);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
@@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, j, n_matches, ret;
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
@@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
+static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table[0];
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid &&
+ err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ }
+ return err_info.valid;
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
@@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
+ if (iwl_mvm_check_rt_status(mvm, vif)) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ NULL, 0);
+ ret = 1;
+ goto err;
+ }
+
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (ret < 0) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c5df73231ba3..dade206d5511 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -364,7 +364,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+ /*
+ * Set a 'fake' TID for the command queue, since we use the
+ * hweight() of the tid_bitmap as a refcount now. Not that
+ * we ever even consider the command queue as one we might
+ * want to reuse, but be safe nevertheless.
+ */
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
+ BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 8f71eeed50d9..7ba5bc2ed1c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -512,6 +512,7 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_WILD,
IWL_SCAN_TYPE_MILD,
IWL_SCAN_TYPE_FRAGMENTED,
+ IWL_SCAN_TYPE_FAST_BALANCE,
};
enum iwl_mvm_sched_scan_pass_all_states {
@@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
* This is a state in which a single queue serves more than one TID, all of
* which are not aggregated. Note that the queue is only associated to one
* RA.
- * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
- * This is a state of a queue that has had traffic on it, but during the
- * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
- * it. In this state, when a new queue is needed to be allocated but no
- * such free queue exists, an inactive queue might be freed and given to
- * the new RA/TID.
- * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
- * This is the state of a queue that has had traffic pass through it, but
- * needs to be reconfigured for some reason, e.g. the queue needs to
- * become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
- IWL_MVM_QUEUE_INACTIVE,
- IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
+struct iwl_mvm_dqa_txq_info {
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+ bool reserved; /* Is this the TXQ reserved for a STA */
+ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
+ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ /* Timestamp for inactivation per TID of this queue */
+ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
+ enum iwl_mvm_queue_status status;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -843,17 +843,7 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
- struct {
- u8 hw_queue_refcount;
- u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
- bool reserved; /* Is this the TXQ reserved for a STA */
- u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
- u8 txq_tid; /* The TID "owner" of this queue*/
- u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
- /* Timestamp for inactivation per TID of this queue */
- unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
- enum iwl_mvm_queue_status status;
- } queue_info[IWL_MAX_HW_QUEUES];
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
@@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
mvmvif->low_latency &= ~cause;
}
-/* hw scheduler queue config */
-bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout);
-int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
- u8 sta_id, u8 tid, unsigned int timeout);
-
-int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags);
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
-
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
*/
@@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
+ lockdep_assert_held(&mvm->mutex);
+ /* calling this function without using dump_start/end since at this
+ * point we already hold the op mode mutex
+ */
+ iwl_fw_dbg_collect_sync(&mvm->fwrt);
iwl_fw_cancel_timestamp(&mvm->fwrt);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
-void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
-
#define MVM_TCM_PERIOD_MSEC 500
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
#define MVM_LL_PERIOD (10 * HZ)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 2c75f51a04e4..089972280daa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+ &tx_resp_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate but
@@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
table = &lq_sta->lq;
lq_hwrate = le32_to_cpu(table->rs_table[0]);
- rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
/* Here we actually compare this rate to the latest LQ command */
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
@@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
lq_hwrate = le32_to_cpu(table->rs_table[i]);
- rs_rate_from_ucode_rate(lq_hwrate, info->band,
- &lq_rate);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
@@ -3260,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
for (i = 0; i < num_rates; i++)
lq_cmd->rs_table[i] = ucode_rate_le32;
- rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+ if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
if (is_mimo(&rate))
lq_cmd->mimo_delim = num_rates - 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index ffcd0ca86041..cfb784fea77b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
.suspend_time = 95,
.max_out_time = 44,
},
+ [IWL_SCAN_TYPE_FAST_BALANCE] = {
+ .suspend_time = 30,
+ .max_out_time = 37,
+ },
};
struct iwl_mvm_scan_params {
@@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
+struct iwl_is_dcm_with_go_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool is_dcm_with_p2p_go;
+};
+
+static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_is_dcm_with_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+ other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+ data->is_dcm_with_p2p_go = true;
+}
+
static enum
-iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
@@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
- if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
- fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
- return IWL_SCAN_TYPE_FRAGMENTED;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+ (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ /* in case of DCM with GO where BSS DTIM interval < 220msec
+ * set all scan requests as fast-balance scan
+ * */
+ if (vif && vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.dtim_period < 220) {
+ struct iwl_is_dcm_with_go_iterator_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_is_dcm_with_go_iterator,
+ &data);
+ if (data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
+ }
+ }
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
return IWL_SCAN_TYPE_MILD;
@@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
}
static enum
-iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
@@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
- return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
- bool p2p_device,
+ struct ieee80211_vif *vif,
enum nl80211_band band)
{
enum iwl_mvm_traffic_load load;
@@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
load = iwl_mvm_get_traffic_load_band(mvm, band);
low_latency = iwl_mvm_low_latency_band(mvm, band);
- return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static int
@@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
params->scan_plans[0].iterations == 1;
}
+static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
+{
+ return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+ type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- params->type != IWL_SCAN_TYPE_FRAGMENTED)
+ !iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
return flags;
@@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
- enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
@@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
if (iwl_mvm_is_cdb_supported(mvm)) {
enum iwl_mvm_scan_type lb_type, hb_type;
- lb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
- hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
@@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cpu_to_le32(scan_timing[hb_type].suspend_time);
} else {
enum iwl_mvm_scan_type type =
- iwl_mvm_get_scan_type(mvm, false);
+ iwl_mvm_get_scan_type(mvm, NULL);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
@@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return -ENOBUFS;
if (iwl_mvm_is_cdb_supported(mvm)) {
- type = iwl_mvm_get_scan_type_band(mvm, false,
+ type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
- hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
return 0;
} else {
- type = iwl_mvm_get_scan_type(mvm, false);
+ type = iwl_mvm_get_scan_type(mvm, NULL);
if (type == mvm->scan_type)
return 0;
}
@@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
SCAN_CONFIG_N_CHANNELS(num_channels) |
- (type == IWL_SCAN_TYPE_FRAGMENTED ?
+ (iwl_mvm_is_scan_fragmented(type) ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
@@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
*/
if (iwl_mvm_cdb_scan_api(mvm)) {
if (iwl_mvm_is_cdb_supported(mvm))
- flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
+ flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
@@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm) &&
- params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
+ iwl_mvm_is_scan_fragmented(params->hb_type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
*/
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- params->type != IWL_SCAN_TYPE_FRAGMENTED &&
+ !iwl_mvm_is_scan_fragmented(params->type) &&
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
!iwl_mvm_is_oce_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
@@ -1589,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
- bool p2p)
+ struct ieee80211_vif *vif)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->type =
- iwl_mvm_get_scan_type_band(mvm, p2p,
+ iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_2GHZ);
params->hb_type =
- iwl_mvm_get_scan_type_band(mvm, p2p,
+ iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_5GHZ);
} else {
- params->type = iwl_mvm_get_scan_type(mvm, p2p);
+ params->type = iwl_mvm_get_scan_type(mvm, vif);
}
}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1649,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
- iwl_mvm_fill_scan_type(mvm, &params,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
@@ -1745,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
- iwl_mvm_fill_scan_type(mvm, &params,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 8f929c774e70..1887d2b9f185 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -358,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
return ret;
}
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 tid, u8 flags)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+ int ret;
+
+ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+ return -EINVAL;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_free(mvm->trans, queue);
+
+ return 0;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ /*
+ * If there is another TID with the same AC - don't remove the MAC queue
+ * from the mapping
+ */
+ if (tid < IWL_MAX_TID_COUNT) {
+ unsigned long tid_bitmap =
+ mvm->queue_info[queue].tid_bitmap;
+ int ac = tid_to_mac80211_ac[tid];
+ int i;
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+ if (tid_to_mac80211_ac[i] == ac)
+ remove_mac_queue = false;
+ }
+ }
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ cmd.action = mvm->queue_info[queue].tid_bitmap ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ queue,
+ mvm->queue_info[queue].tid_bitmap,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ /* If the queue is still enabled - nothing left to do in this func */
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+
+ /* Make sure queue info is correct even though we overwrite it */
+ WARN(mvm->queue_info[queue].tid_bitmap ||
+ mvm->hw_queue_to_mac80211[queue],
+ "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
+ queue, mvm->hw_queue_to_mac80211[queue],
+ mvm->queue_info[queue].tid_bitmap);
+
+ /* If we are here - the queue is freed and we can zero out these vals */
+ mvm->queue_info[queue].tid_bitmap = 0;
+ mvm->hw_queue_to_mac80211[queue] = 0;
+
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+ mvm->queue_info[queue].reserved = false;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
+ return ret;
+}
+
static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
@@ -447,11 +549,12 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
- bool same_sta)
+ u8 new_sta_id)
{
struct iwl_mvm_sta *mvmsta;
u8 txq_curr_ac, sta_id, tid;
unsigned long disable_agg_tids = 0;
+ bool same_sta;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -465,6 +568,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
+ same_sta = sta_id == new_sta_id;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (WARN_ON(!mvmsta))
return -EINVAL;
@@ -479,10 +584,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
mvmsta->vif->hw_queue[txq_curr_ac],
tid, 0);
if (ret) {
- /* Re-mark the inactive queue as inactive */
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
@@ -504,7 +605,13 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
u8 ac_to_queue[IEEE80211_NUM_ACS];
int i;
+ /*
+ * This protects us against grabbing a queue that's being reconfigured
+ * by the inactivity checker.
+ */
+ lockdep_assert_held(&mvm->mutex);
lockdep_assert_held(&mvm->queue_info_lock);
+
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
@@ -517,11 +624,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue;
- /* Don't try and take queues being reconfigured */
- if (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_RECONFIGURING)
- continue;
-
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
}
@@ -562,14 +664,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return -ENOSPC;
}
- /* Make sure the queue isn't in the middle of being reconfigured */
- if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
- IWL_ERR(mvm,
- "TXQ %d is in the middle of re-config - try again\n",
- queue);
- return -EBUSY;
- }
-
return queue;
}
@@ -579,9 +673,9 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
@@ -616,7 +710,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue];
- shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+ shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
@@ -674,6 +768,57 @@ out:
return ret;
}
+static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
+ u8 minq, u8 maxq)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
+ /* Start by looking for a free queue */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].tid_bitmap == 0 &&
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+ return i;
+
+ return -ENOSPC;
+}
+
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+
+ if (tid == IWL_MAX_TID_COUNT) {
+ tid = IWL_MGMT_TID;
+ size = IWL_MGMT_QUEUE_SIZE;
+ }
+ queue = iwl_trans_txq_alloc(mvm->trans,
+ cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+ sta_id, tid, SCD_QUEUE_CFG, size, timeout);
+
+ if (queue < 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+ sta_id, tid, queue);
+ return queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+ queue, sta_id, tid);
+
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+ queue, mvm->hw_queue_to_mac80211[queue]);
+
+ return queue;
+}
+
static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac,
int tid)
@@ -698,12 +843,428 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock_bh(&mvmsta->lock);
return 0;
}
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 sta_id, u8 tid)
+{
+ bool enable_queue = true;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure this TID isn't already enabled */
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+ queue, tid);
+ return false;
+ }
+
+ /* Update mappings and refcounts */
+ if (mvm->queue_info[queue].tid_bitmap)
+ enable_queue = false;
+
+ if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+ WARN(mac80211_queue >=
+ BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+ "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+ mac80211_queue, queue, sta_id, tid);
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ }
+
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
+
+ if (enable_queue) {
+ if (tid != IWL_MAX_TID_COUNT)
+ mvm->queue_info[queue].mac80211_ac =
+ tid_to_mac80211_ac[tid];
+ else
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = tid;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ queue, mvm->queue_info[queue].tid_bitmap,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return enable_queue;
+}
+
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Send the enabling command if we need to */
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
+}
+
+static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ return;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd = {0};
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ *
+ * Returns %true if all TIDs were removed and the queue could be reused.
+ */
+static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int queue,
+ unsigned long tid_bitmap,
+ unsigned long *unshare_queues,
+ unsigned long *changetid_queues)
+{
+ int tid;
+
+ lockdep_assert_held(&mvmsta->lock);
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ /* If some TFDs are still queued - don't mark TID as inactive */
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+
+ /* Don't mark as inactive any TID that has an active BA */
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+ tid_bitmap &= ~BIT(tid);
+ }
+
+ /* If all TIDs in the queue are inactive - return it can be reused */
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
+ return true;
+ }
+
+ /*
+ * If we are here, this is a shared queue and not all TIDs timed-out.
+ * Remove the ones that did.
+ */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+ u16 tid_bitmap;
+
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ *
+ * Mark this queue in the right bitmap, we'll send the command
+ * to the firmware later.
+ */
+ if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ set_bit(queue, changetid_queues);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Removing inactive TID %d from shared Q:%d\n",
+ tid, queue);
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /*
+ * There may be different TIDs with the same mac queues, so make
+ * sure all TIDs have existing corresponding mac queues enabled
+ */
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ mvm->hw_queue_to_mac80211[queue] |=
+ BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+ }
+
+ /* If the queue is marked as shared - "unshare" it */
+ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ set_bit(queue, unshare_queues);
+ }
+
+ return false;
+}
+
+/*
+ * Check for inactivity - this includes checking if any queue
+ * can be unshared and finding one (and only one) that can be
+ * reused.
+ * This function is also invoked as a sort of clean-up task,
+ * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
+ *
+ * Returns the queue number, or -ENOSPC.
+ */
+static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
+{
+ unsigned long now = jiffies;
+ unsigned long unshare_queues = 0;
+ unsigned long changetid_queues = 0;
+ int i, ret, free_queue = -ENOSPC;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -ENOSPC;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ /* we skip the CMD queue below by starting at 1 */
+ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
+
+ for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid;
+ unsigned long inactive_tid_bitmap = 0;
+ unsigned long queue_tid_bitmap;
+
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+ if (!queue_tid_bitmap)
+ continue;
+
+ /* If TXQ isn't in active use anyway - nothing to do here... */
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
+ continue;
+
+ /* Check to see if there are inactive TIDs on this queue */
+ for_each_set_bit(tid, &queue_tid_bitmap,
+ IWL_MAX_TID_COUNT + 1) {
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ continue;
+
+ inactive_tid_bitmap |= BIT(tid);
+ }
+
+ /* If all TIDs are active - finish check on this queue */
+ if (!inactive_tid_bitmap)
+ continue;
+
+ /*
+ * If we are here - the queue hadn't been served recently and is
+ * in use
+ */
+
+ sta_id = mvm->queue_info[i].ra_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * If the STA doesn't exist anymore, it isn't an error. It could
+ * be that it was removed since getting the queues, and in this
+ * case it should've inactivated its queues anyway.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* this isn't so nice, but works OK due to the way we loop */
+ spin_unlock(&mvm->queue_info_lock);
+
+ /* and we need this locking order */
+ spin_lock(&mvmsta->lock);
+ spin_lock(&mvm->queue_info_lock);
+ ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+ inactive_tid_bitmap,
+ &unshare_queues,
+ &changetid_queues);
+ if (ret >= 0 && free_queue < 0)
+ free_queue = ret;
+ /* only unlock sta lock - we still need the queue info lock */
+ spin_unlock(&mvmsta->lock);
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Reconfigure queues requiring reconfiguation */
+ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_unshare_queue(mvm, i);
+ for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_change_queue_tid(mvm, i);
+
+ if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
+ alloc_for_sta);
+ if (ret)
+ return ret;
+ }
+
+ return free_queue;
+}
+
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@@ -719,7 +1280,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
- bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false, inc_ssn;
@@ -756,9 +1316,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
(mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_RESERVED ||
- mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_INACTIVE)) {
+ IWL_MVM_QUEUE_RESERVED)) {
queue = mvmsta->reserved_queue;
mvm->queue_info[queue].reserved = true;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
@@ -768,21 +1326,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ spin_unlock_bh(&mvm->queue_info_lock);
- /*
- * Check if this queue is already allocated but inactive.
- * In such a case, we'll need to first free this queue before enabling
- * it again, so we'll mark it as reserved to make sure no new traffic
- * arrives on it
- */
- if (queue > 0 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
- IWL_DEBUG_TX_QUEUES(mvm,
- "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
- queue, mvmsta->sta_id, tid);
+ /* try harder - perhaps kill an inactive queue */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+
+ spin_lock_bh(&mvm->queue_info_lock);
}
/* No free queue - we'll have to share */
@@ -800,7 +1350,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
* This will allow avoiding re-acquiring the lock at the end of the
* configuration. On error we'll mark it back as free.
*/
- if ((queue > 0) && !shared_queue)
+ if (queue > 0 && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -821,16 +1371,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
- /*
- * If this queue was previously inactive (idle) - we need to free it
- * first
- */
- if (using_inactive_queue) {
- ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
- if (ret)
- return ret;
- }
-
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating %squeue #%d to sta %d on tid %d\n",
shared_queue ? "shared " : "", queue,
@@ -874,7 +1414,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (inc_ssn)
mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
queue_state = mvmsta->tid_data[tid].state;
@@ -909,129 +1448,6 @@ out_err:
return ret;
}
-static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_UPDATE_QUEUE_TID,
- };
- int tid;
- unsigned long tid_bitmap;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- spin_lock_bh(&mvm->queue_info_lock);
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
- return;
-
- /* Find any TID for queue */
- tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
- cmd.tid = tid;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- if (ret) {
- IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
- queue, ret);
- return;
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].txq_tid = tid;
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
- queue, tid);
-}
-
-static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
-{
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id;
- int tid = -1;
- unsigned long tid_bitmap;
- unsigned int wdg_timeout;
- int ssn;
- int ret = true;
-
- /* queue sharing is disabled on new TX path */
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- lockdep_assert_held(&mvm->mutex);
-
- spin_lock_bh(&mvm->queue_info_lock);
- sta_id = mvm->queue_info[queue].ra_sta_id;
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* Find TID for queue, and make sure it is the only one on the queue */
- tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
- if (tid_bitmap != BIT(tid)) {
- IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
- queue, tid_bitmap);
- return;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
- tid);
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
-
- ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
-
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
- tid_to_mac80211_ac[tid], ssn,
- wdg_timeout, true);
- if (ret) {
- IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
- return;
- }
-
- /* If aggs should be turned back on - do it */
- if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
- struct iwl_mvm_add_sta_cmd cmd = {0};
-
- mvmsta->tid_disable_agg &= ~BIT(tid);
-
- cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
- cmd.sta_id = mvmsta->sta_id;
- cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
- cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
- iwl_mvm_add_sta_cmd_size(mvm), &cmd);
- if (!ret) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "TXQ #%d is now aggregated again\n",
- queue);
-
- /* Mark queue intenally as aggregating again */
- iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
- }
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
- spin_unlock_bh(&mvm->queue_info_lock);
-}
-
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
@@ -1100,47 +1516,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
- int queue, sta_id, tid;
-
- /* Check inactivity of queues */
- iwl_mvm_inactivity_check(mvm);
+ int sta_id, tid;
mutex_lock(&mvm->mutex);
- /* No queue reconfiguration in TVQM mode */
- if (iwl_mvm_has_new_tx_api(mvm))
- goto alloc_queues;
-
- /* Reconfigure queues requiring reconfiguation */
- for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
- bool reconfig;
- bool change_owner;
-
- spin_lock_bh(&mvm->queue_info_lock);
- reconfig = (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_RECONFIGURING);
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
- /*
- * We need to take into account a situation in which a TXQ was
- * allocated to TID x, and then turned shared by adding TIDs y
- * and z. If TID x becomes inactive and is removed from the TXQ,
- * ownership must be given to one of the remaining TIDs.
- * This is mainly because if TID x continues - a new queue can't
- * be allocated for it as long as it is an owner of another TXQ.
- */
- change_owner = !(mvm->queue_info[queue].tid_bitmap &
- BIT(mvm->queue_info[queue].txq_tid)) &&
- (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_SHARED);
- spin_unlock_bh(&mvm->queue_info_lock);
-
- if (reconfig)
- iwl_mvm_unshare_queue(mvm, queue);
- else if (change_owner)
- iwl_mvm_change_queue_owner(mvm, queue);
- }
-
-alloc_queues:
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -1167,23 +1548,19 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
- bool using_inactive_queue = false, same_sta = false;
/* queue reserving is disabled on new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return 0;
- /*
- * Check for inactive queues, so we don't reach a situation where we
- * can't add a STA due to a shortage in queues that doesn't really exist
- */
- iwl_mvm_inactivity_check(mvm);
+ /* run the general cleanup/unsharing of queues */
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
- !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
@@ -1193,16 +1570,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "No available queues for new station\n");
- return -ENOSPC;
- } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- /*
- * If this queue is already allocated but inactive we'll need to
- * first free this queue before enabling it again, we'll mark
- * it as reserved to make sure no new traffic arrives on it
- */
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+ /* try again - this time kick out a queue if needed */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+ if (queue < 0) {
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ }
+ spin_lock_bh(&mvm->queue_info_lock);
}
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
@@ -1210,9 +1584,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
mvmsta->reserved_queue = queue;
- if (using_inactive_queue)
- iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
-
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
queue, mvmsta->sta_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 0fc211108149..de1a0a2d8723 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
- * @is_tid_active: has this TID sent traffic in the last
- * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
- * field should be ignored.
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
* @tx_count_last: number of frames transmitted during the last second
* @tx_count: counts the number of frames transmitted since the last reset of
@@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
- bool is_tid_active;
unsigned long tpt_meas_start;
u32 tx_count_last;
u32 tx_count;
@@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force);
-
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 99c64ea2619b..ec57682efe54 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1140,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
- !mvmsta->tid_data[tid].is_tid_active)) {
- /* If TXQ needs to be allocated... */
- if (txq_id == IWL_MVM_INVALID_QUEUE) {
- iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
- /*
- * The frame is now deferred, and the worker scheduled
- * will re-allocate it, so we can free it for now.
- */
- iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
- spin_unlock(&mvmsta->lock);
- return 0;
- }
-
- /* queue should always be active in new TX path */
- WARN_ON(iwl_mvm_has_new_tx_api(mvm));
-
- /* If we are here - TXQ exists and needs to be re-activated */
- spin_lock(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
- mvmsta->tid_data[tid].is_tid_active = true;
- spin_unlock(&mvm->queue_info_lock);
-
- IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
- txq_id);
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 6c14d3413bdc..818e1180bbdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
-{
- int i;
-
- lockdep_assert_held(&mvm->queue_info_lock);
-
- /* This should not be hit with new TX path */
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return -ENOSPC;
-
- /* Start by looking for a free queue */
- for (i = minq; i <= maxq; i++)
- if (mvm->queue_info[i].hw_queue_refcount == 0 &&
- mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
- return i;
-
- /*
- * If no free queue found - settle for an inactive one to reconfigure
- * Make sure that the inactive queue either already belongs to this STA,
- * or that if it belongs to another one - it isn't the reserved queue
- */
- for (i = minq; i <= maxq; i++)
- if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
- (sta_id == mvm->queue_info[i].ra_sta_id ||
- !mvm->queue_info[i].reserved))
- return i;
-
- return -ENOSPC;
-}
-
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
@@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
- if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO;
@@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 sta_id, u8 tid)
-{
- bool enable_queue = true;
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- /* Make sure this TID isn't already enabled */
- if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
- queue, tid);
- return false;
- }
-
- /* Update mappings and refcounts */
- if (mvm->queue_info[queue].hw_queue_refcount > 0)
- enable_queue = false;
-
- if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
- WARN(mac80211_queue >=
- BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
- "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
- mac80211_queue, queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- }
-
- mvm->queue_info[queue].hw_queue_refcount++;
- mvm->queue_info[queue].tid_bitmap |= BIT(tid);
- mvm->queue_info[queue].ra_sta_id = sta_id;
-
- if (enable_queue) {
- if (tid != IWL_MAX_TID_COUNT)
- mvm->queue_info[queue].mac80211_ac =
- tid_to_mac80211_ac[tid];
- else
- mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
-
- mvm->queue_info[queue].txq_tid = tid;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
- queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue]);
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- return enable_queue;
-}
-
-int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
- u8 sta_id, u8 tid, unsigned int timeout)
-{
- int queue, size = IWL_DEFAULT_QUEUE_SIZE;
-
- if (tid == IWL_MAX_TID_COUNT) {
- tid = IWL_MGMT_TID;
- size = IWL_MGMT_QUEUE_SIZE;
- }
- queue = iwl_trans_txq_alloc(mvm->trans,
- cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
- sta_id, tid, SCD_QUEUE_CFG, size, timeout);
-
- if (queue < 0) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
- sta_id, tid, queue);
- return queue;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
- queue, sta_id, tid);
-
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d (mac80211 map:0x%x)\n",
- queue, mvm->hw_queue_to_mac80211[queue]);
-
- return queue;
-}
-
-bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = cfg->frame_limit,
- .sta_id = cfg->sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = cfg->fifo,
- .aggregate = cfg->aggregate,
- .tid = cfg->tid,
- };
- bool inc_ssn;
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return false;
-
- /* Send the enabling command if we need to */
- if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid))
- return false;
-
- inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
- NULL, wdg_timeout);
- if (inc_ssn)
- le16_add_cpu(&cmd.ssn, 1);
-
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
- "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
-
- return inc_ssn;
-}
-
-int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_DISABLE_QUEUE,
- };
- bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
- int ret;
-
- if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
- return -EINVAL;
-
- if (iwl_mvm_has_new_tx_api(mvm)) {
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- iwl_trans_txq_free(mvm->trans, queue);
-
- return 0;
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return 0;
- }
-
- mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
-
- /*
- * If there is another TID with the same AC - don't remove the MAC queue
- * from the mapping
- */
- if (tid < IWL_MAX_TID_COUNT) {
- unsigned long tid_bitmap =
- mvm->queue_info[queue].tid_bitmap;
- int ac = tid_to_mac80211_ac[tid];
- int i;
-
- for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
- if (tid_to_mac80211_ac[i] == ac)
- remove_mac_queue = false;
- }
- }
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
- mvm->queue_info[queue].hw_queue_refcount--;
-
- cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
- SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
- if (cmd.action == SCD_CFG_DISABLE_QUEUE)
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
- queue,
- mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue]);
-
- /* If the queue is still enabled - nothing left to do in this func */
- if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return 0;
- }
-
- cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tid = mvm->queue_info[queue].txq_tid;
-
- /* Make sure queue info is correct even though we overwrite it */
- WARN(mvm->queue_info[queue].hw_queue_refcount ||
- mvm->queue_info[queue].tid_bitmap ||
- mvm->hw_queue_to_mac80211[queue],
- "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
- queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue],
- mvm->queue_info[queue].tid_bitmap);
-
- /* If we are here - the queue is freed and we can zero out these vals */
- mvm->queue_info[queue].hw_queue_refcount = 0;
- mvm->queue_info[queue].tid_bitmap = 0;
- mvm->hw_queue_to_mac80211[queue] = 0;
-
- /* Regardless if this is a reserved TXQ for a STA - mark it as false */
- mvm->queue_info[queue].reserved = false;
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
- sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
-
- if (ret)
- IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
- queue, ret);
- return ret;
-}
-
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @sync: This command can be sent synchronously.
@@ -1255,171 +1002,6 @@ out:
ieee80211_connection_loss(vif);
}
-/*
- * Remove inactive TIDs of a given queue.
- * If all queue TIDs are inactive - mark the queue as inactive
- * If only some the queue TIDs are inactive - unmap them from the queue
- */
-static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvmsta, int queue,
- unsigned long tid_bitmap)
-{
- int tid;
-
- lockdep_assert_held(&mvmsta->lock);
- lockdep_assert_held(&mvm->queue_info_lock);
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- /* If some TFDs are still queued - don't mark TID as inactive */
- if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
- tid_bitmap &= ~BIT(tid);
-
- /* Don't mark as inactive any TID that has an active BA */
- if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
- tid_bitmap &= ~BIT(tid);
- }
-
- /* If all TIDs in the queue are inactive - mark queue as inactive. */
- if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
-
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
- mvmsta->tid_data[tid].is_tid_active = false;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
- queue);
- return;
- }
-
- /*
- * If we are here, this is a shared queue and not all TIDs timed-out.
- * Remove the ones that did.
- */
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
-
- mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
- mvm->queue_info[queue].hw_queue_refcount--;
- mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- mvmsta->tid_data[tid].is_tid_active = false;
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Removing inactive TID %d from shared Q:%d\n",
- tid, queue);
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "TXQ #%d left with tid bitmap 0x%x\n", queue,
- mvm->queue_info[queue].tid_bitmap);
-
- /*
- * There may be different TIDs with the same mac queues, so make
- * sure all TIDs have existing corresponding mac queues enabled
- */
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->hw_queue_to_mac80211[queue] |=
- BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
- }
-
- /* If the queue is marked as shared - "unshare" it */
- if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
- IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
- queue);
- }
-}
-
-void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
-{
- unsigned long timeout_queues_map = 0;
- unsigned long now = jiffies;
- int i;
-
- if (iwl_mvm_has_new_tx_api(mvm))
- return;
-
- spin_lock_bh(&mvm->queue_info_lock);
- for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
- if (mvm->queue_info[i].hw_queue_refcount > 0)
- timeout_queues_map |= BIT(i);
- spin_unlock_bh(&mvm->queue_info_lock);
-
- rcu_read_lock();
-
- /*
- * If a queue time outs - mark it as INACTIVE (don't remove right away
- * if we don't have to.) This is an optimization in case traffic comes
- * later, and we don't HAVE to use a currently-inactive queue
- */
- for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id;
- int tid;
- unsigned long inactive_tid_bitmap = 0;
- unsigned long queue_tid_bitmap;
-
- spin_lock_bh(&mvm->queue_info_lock);
- queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
-
- /* If TXQ isn't in active use anyway - nothing to do here... */
- if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
- mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
- spin_unlock_bh(&mvm->queue_info_lock);
- continue;
- }
-
- /* Check to see if there are inactive TIDs on this queue */
- for_each_set_bit(tid, &queue_tid_bitmap,
- IWL_MAX_TID_COUNT + 1) {
- if (time_after(mvm->queue_info[i].last_frame_time[tid] +
- IWL_MVM_DQA_QUEUE_TIMEOUT, now))
- continue;
-
- inactive_tid_bitmap |= BIT(tid);
- }
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* If all TIDs are active - finish check on this queue */
- if (!inactive_tid_bitmap)
- continue;
-
- /*
- * If we are here - the queue hadn't been served recently and is
- * in use
- */
-
- sta_id = mvm->queue_info[i].ra_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-
- /*
- * If the STA doesn't exist anymore, it isn't an error. It could
- * be that it was removed since getting the queues, and in this
- * case it should've inactivated its queues anyway.
- */
- if (IS_ERR_OR_NULL(sta))
- continue;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- spin_lock_bh(&mvmsta->lock);
- spin_lock(&mvm->queue_info_lock);
- iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
- inactive_tid_bitmap);
- spin_unlock(&mvm->queue_info_lock);
- spin_unlock_bh(&mvmsta->lock);
- }
-
- rcu_read_unlock();
-}
-
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index b71cf55480fc..e880f69eac26 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
- tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -438,6 +438,9 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
@@ -454,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
- int tx_cmd_len)
+ int tx_cmd_len,
+ bool pad)
{
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
@@ -478,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
- tb1_len = ALIGN(len, 4);
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -486,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -496,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb->data + hdr_len,
+ tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
-
return tfd;
out_err:
@@ -551,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
out_meta, hdr_len, len);
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len);
+ hdr_len, len, !amsdu);
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index f227b91098c9..87b7225fe289 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb->data + hdr_len,
+ head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
@@ -2011,6 +2014,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
@@ -2190,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
- hdr_tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -2216,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
- size);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ size);
data_left -= size;
tso_build_data(skb, &tso, size);
@@ -2398,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ iwl_pcie_get_tfd(trans, txq,
+ txq->write_ptr),
+ trans_pcie->tfd_size,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
+ hdr_len);
+
/*
* If gso_size wasn't set, don't give the frame "amsdu treatment"
* (adding subframes, etc.).
@@ -2421,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
out_meta)))
goto out_err;
}
-
- trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq,
- txq->write_ptr),
- trans_pcie->tfd_size,
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
- hdr_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
}
/* building the A-MSDU might have changed this data, so memcpy it now */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6f2730c7229b..aa8058264d5b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -495,7 +495,6 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
-static struct workqueue_struct *hwsim_wq;
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static int hwsim_radios_generation = 1;
@@ -3692,13 +3691,9 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
- hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
- if (!hwsim_wq)
- return -ENOMEM;
-
err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
if (err)
- goto out_free_wq;
+ return err;
err = register_pernet_device(&hwsim_net_ops);
if (err)
@@ -3829,8 +3824,6 @@ out_unregister_pernet:
unregister_pernet_device(&hwsim_net_ops);
out_free_rht:
rhashtable_destroy(&hwsim_radios_rht);
-out_free_wq:
- destroy_workqueue(hwsim_wq);
return err;
}
module_init(init_mac80211_hwsim);
@@ -3842,12 +3835,10 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
- flush_workqueue(hwsim_wq);
rhashtable_destroy(&hwsim_radios_rht);
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
- destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c
index 7d88223f890b..cebf03c6a622 100644
--- a/drivers/net/wireless/marvell/libertas/if_cs.c
+++ b/drivers/net/wireless/marvell/libertas/if_cs.c
@@ -900,8 +900,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Make this card known to the libertas driver */
priv = lbs_add_card(card, &p_dev->dev);
- if (!priv) {
- ret = -ENOMEM;
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
goto out2;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 43743c26c071..8d98e7fdd27c 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1206,8 +1206,8 @@ static int if_sdio_probe(struct sdio_func *func,
priv = lbs_add_card(card, &func->dev);
- if (!priv) {
- ret = -ENOMEM;
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
goto free;
}
@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
if (priv->fw_ready) {
+ ret = lbs_suspend(priv);
+ if (ret)
+ return ret;
+
priv->power_up_on_resume = true;
if_sdio_power_off(card);
}
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index e9aec6cb1105..504d6e096476 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1146,8 +1146,8 @@ static int if_spi_probe(struct spi_device *spi)
* This will call alloc_etherdev.
*/
priv = lbs_add_card(card, &spi->dev);
- if (!priv) {
- err = -ENOMEM;
+ if (IS_ERR(priv)) {
+ err = PTR_ERR(priv);
goto free_card;
}
card->priv = priv;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index c67a8e7be310..220dcdee8d2b 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -254,8 +254,11 @@ static int if_usb_probe(struct usb_interface *intf,
goto dealloc;
}
- if (!(priv = lbs_add_card(cardp, &intf->dev)))
+ priv = lbs_add_card(cardp, &intf->dev);
+ if (IS_ERR(priv)) {
+ r = PTR_ERR(priv);
goto err_add_card;
+ }
cardp->priv = priv;
@@ -456,8 +459,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
cardp);
- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
-
lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index f22e1c220cba..f7db60bc7c7f 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -907,25 +907,29 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
struct net_device *dev;
struct wireless_dev *wdev;
struct lbs_private *priv = NULL;
+ int err;
/* Allocate an Ethernet device and register it */
wdev = lbs_cfg_alloc(dmdev);
if (IS_ERR(wdev)) {
+ err = PTR_ERR(wdev);
pr_err("cfg80211 init failed\n");
- goto done;
+ goto err_cfg;
}
wdev->iftype = NL80211_IFTYPE_STATION;
priv = wdev_priv(wdev);
priv->wdev = wdev;
- if (lbs_init_adapter(priv)) {
+ err = lbs_init_adapter(priv);
+ if (err) {
pr_err("failed to initialize adapter structure\n");
goto err_wdev;
}
dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
if (!dev) {
+ err = -ENOMEM;
dev_err(dmdev, "no memory for network device instance\n");
goto err_adapter;
}
@@ -949,6 +953,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
init_waitqueue_head(&priv->waitq);
priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
if (IS_ERR(priv->main_thread)) {
+ err = PTR_ERR(priv->main_thread);
lbs_deb_thread("Error creating main thread.\n");
goto err_ndev;
}
@@ -961,7 +966,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
priv->wol_gap = 20;
priv->ehs_remove_supported = true;
- goto done;
+ return priv;
err_ndev:
free_netdev(dev);
@@ -972,10 +977,8 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
err_wdev:
lbs_cfg_free(priv);
- priv = NULL;
-
-done:
- return priv;
+ err_cfg:
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(lbs_add_card);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 30a5d928e655..1d6bbce76041 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -79,6 +79,7 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
.copy = mt76_mmio_copy,
.wr_rp = mt76_mmio_wr_rp,
.rd_rp = mt76_mmio_rd_rp,
+ .type = MT76_BUS_MMIO,
};
dev->bus = &mt76_mmio_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index f723a07cab29..3bfa7f5e3513 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -38,6 +38,11 @@ struct mt76_reg_pair {
u32 value;
};
+enum mt76_bus_type {
+ MT76_BUS_MMIO,
+ MT76_BUS_USB,
+};
+
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@@ -48,8 +53,12 @@ struct mt76_bus_ops {
const struct mt76_reg_pair *rp, int len);
int (*rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
+ enum mt76_bus_type type;
};
+#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
+
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
MT_TXQ_VI = IEEE80211_AC_VI,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
deleted file mode 100644
index 891ce1c3461f..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76X0U_DMA_H
-#define __MT76X0U_DMA_H
-
-#include <asm/unaligned.h>
-#include <linux/skbuff.h>
-
-#define MT_DMA_HDR_LEN 4
-#define MT_RX_INFO_LEN 4
-#define MT_FCE_INFO_LEN 4
-#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
-
-/* Common Tx DMA descriptor fields */
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-/* Tx DMA MCU command specific flags */
-#define MT_TXD_CMD_SEQ GENMASK(19, 16)
-#define MT_TXD_CMD_TYPE GENMASK(26, 20)
-
-enum mt76_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
-enum mt76_info_type {
- DMA_PACKET,
- DMA_COMMAND,
-};
-
-/* Tx DMA packet specific flags */
-#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
-#define MT_TXD_PKT_INFO_80211 BIT(19)
-#define MT_TXD_PKT_INFO_TSO BIT(20)
-#define MT_TXD_PKT_INFO_CSO BIT(21)
-#define MT_TXD_PKT_INFO_WIV BIT(24)
-#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
-
-enum mt76_qsel {
- MT_QSEL_MGMT,
- MT_QSEL_HCCA,
- MT_QSEL_EDCA,
- MT_QSEL_EDCA_2,
-};
-
-
-static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
- enum mt76_msg_port d_port,
- enum mt76_info_type type, u32 flags)
-{
- u32 info;
-
- /* Buffer layout:
- * | 4B | xfer len | pad | 4B |
- * | TXINFO | pkt/cmd | zero pad to 4B | zero |
- *
- * length field of TXINFO should be set to 'xfer len'.
- */
-
- info = flags |
- FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
- FIELD_PREP(MT_TXD_INFO_TYPE, type);
-
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- return skb_put_padto(skb, round_up(skb->len, 4) + 4);
-}
-
-static inline int
-mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
-{
- flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
- return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
-}
-
-/* Common Rx DMA descriptor fields */
-#define MT_RXD_INFO_LEN GENMASK(13, 0)
-#define MT_RXD_INFO_PCIE_INTR BIT(24)
-#define MT_RXD_INFO_QSEL GENMASK(26, 25)
-#define MT_RXD_INFO_PORT GENMASK(29, 27)
-#define MT_RXD_INFO_TYPE GENMASK(31, 30)
-
-/* Rx DMA packet specific flags */
-#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
-#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
-#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
-#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
-#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
-#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
-
-/* Rx DMA MCU command specific flags */
-#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
-#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
-
-enum mt76_evt_type {
- CMD_DONE,
- CMD_ERROR,
- CMD_RETRY,
- EVENT_PWR_RSP,
- EVENT_WOW_RSP,
- EVENT_CARRIER_DETECT_RSP,
- EVENT_DFS_DETECT_RSP,
-};
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 5735038c0e2d..ab4fd6e0f23a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -31,8 +31,8 @@ mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
int ret, i;
u32 start = 0, end = 0, cnt_free;
- ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START,
- data, sizeof(data), MT_EE_PHYSICAL_READ);
+ ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
+ sizeof(data), MT_EE_PHYSICAL_READ);
if (ret)
return ret;
@@ -55,10 +55,10 @@ mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
{
- u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
- u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
+ u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+ mt76x02_eeprom_parse_hw_cap(dev);
dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
@@ -86,7 +86,7 @@ static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
{
u8 val;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
if (mt76x02_field_valid(val))
dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
else
@@ -98,12 +98,12 @@ static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
u8 val;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET);
+ val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
if (!mt76x02_field_valid(val))
val = 0;
caldata->freq_offset = val;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
if (!mt76x02_field_valid(val))
val = 0;
@@ -118,10 +118,8 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
u16 rssi_offset;
int i;
- mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset,
- &lna_2g, lna_5g);
- caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g,
- lna_5g, chan);
+ mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
+ caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
val = rssi_offset >> (8 * i);
@@ -132,12 +130,12 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
}
}
-static s8 mt76x0_get_delta(struct mt76_dev *dev)
+static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->chandef;
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u8 val;
- if (mt76x02_tssi_enabled(dev))
+ if (mt76x0_tssi_enabled(dev))
return 0;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
@@ -162,54 +160,54 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power;
- s8 delta = mt76x0_get_delta(&dev->mt76);
+ s8 delta = mt76x0_get_delta(dev);
u16 val, addr;
memset(t, 0, sizeof(*t));
/* cck 1M, 2M, 5.5M, 11M */
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
t->cck[0] = t->cck[1] = s6_to_s8(val);
t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
/* ofdm 6M, 9M, 12M, 18M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
/* ofdm 24M, 36M, 48M, 54M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->stbc[0] = t->stbc[1] = s6_to_s8(val);
t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
- val = mt76x02_eeprom_get(&dev->mt76, addr);
+ val = mt76x02_eeprom_get(dev, addr);
t->stbc[4] = t->stbc[5] = s6_to_s8(val);
t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
- val = mt76x02_eeprom_get(&dev->mt76, 0x132);
+ val = mt76x02_eeprom_get(dev, 0x132);
t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8);
@@ -266,7 +264,7 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
}
- data = mt76x02_eeprom_get(&dev->mt76, addr);
+ data = mt76x02_eeprom_get(dev, addr);
info[0] = data;
if (!info[0] || info[0] > 0x3f)
@@ -312,7 +310,7 @@ static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
if (found < 0)
return found;
- return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data,
+ return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
MT76X0_EEPROM_SIZE, MT_EE_READ);
}
@@ -326,7 +324,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
if (err < 0)
return err;
- data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION);
+ data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
version = data >> 8;
fae = data;
@@ -337,8 +335,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae);
- mt76x02_mac_setaddr(&dev->mt76,
- dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index 40fd4e61769b..ee9ade9f3c8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -37,4 +37,10 @@ static inline s8 s6_to_s8(u32 val)
return ret;
}
+static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index ee2b8e885608..4a9408801260 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -138,7 +138,7 @@ static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
RANDOM_WRITE(dev, common_mac_reg_table);
- mt76x02_set_beacon_offsets(&dev->mt76);
+ mt76x02_set_beacon_offsets(dev);
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@@ -280,7 +280,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev);
- ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
+ ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (ret)
return ret;
@@ -368,7 +368,10 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
- hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2;
+ hw->extra_tx_headroom = 2;
+ if (mt76_is_usb(dev))
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index c9cd0254a979..9273d2d2764a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -16,6 +16,20 @@
#include <linux/etherdevice.h>
#include "mt76x0.h"
+static int
+mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+
+ mt76_set_channel(&dev->mt76);
+ ret = mt76x0_phy_set_channel(dev, chandef);
+ mt76_txq_schedule_all(&dev->mt76);
+
+ return ret;
+}
+
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
@@ -25,7 +39,7 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
- ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+ ret = mt76x0_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
@@ -114,8 +128,6 @@ void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76x02_dev *dev = hw->priv;
- cancel_delayed_work_sync(&dev->cal_work);
- mt76x0_agc_save(dev);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
@@ -125,11 +137,7 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
{
struct mt76x02_dev *dev = hw->priv;
- mt76x0_agc_restore(dev);
clear_bit(MT76_SCANNING, &dev->mt76.state);
-
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
index b66e70f6cd89..3b34e1d2769f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -39,6 +39,9 @@ enum mcu_calibrate {
MCU_CAL_TXDCOC,
MCU_CAL_RX_GROUP_DELAY,
MCU_CAL_TX_GROUP_DELAY,
+ MCU_CAL_VCO,
+ MCU_CAL_NO_SIGNAL = 0xfe,
+ MCU_CAL_FULL = 0xff,
};
int mt76x0e_mcu_init(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 1bff2be45a13..2187bafaf2e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -66,12 +66,11 @@ int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */
void mt76x0_phy_init(struct mt76x02_dev *dev);
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
-void mt76x0_agc_save(struct mt76x02_dev *dev);
-void mt76x0_agc_restore(struct mt76x02_dev *dev);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
/* MAC */
void mt76x0_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 87997cddf0d6..522c86059bcb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -28,6 +28,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
mutex_lock(&dev->mt76.mutex);
mt76x02_mac_start(dev);
+ mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@@ -71,10 +72,19 @@ static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
- .config = mt76x0_config,
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
+ .config = mt76x0_config,
.configure_filter = mt76x02_configure_filter,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76x02_ampdu_action,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .wake_tx_queue = mt76_wake_tx_queue,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@@ -102,28 +112,34 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
u16 val;
mt76_clear(dev, MT_COEXCFG0, BIT(0));
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
- if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) {
- u32 data;
-
- /* set external external PA I/O
- * current to 16mA
- */
- data = mt76_rr(dev, 0x11c);
- val |= 0xc03;
- mt76_wr(dev, 0x11c, val);
- }
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
+ mt76_set(dev, MT_XO_CTRL7, 0xc03);
}
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ err = mt76x0_register_device(dev);
+ if (err < 0)
+ return err;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
return 0;
}
static int
mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
+ };
struct mt76x02_dev *dev;
int ret;
@@ -141,7 +157,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops);
+ dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
if (!dev)
return -ENOMEM;
@@ -150,6 +166,11 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
ret = mt76x0e_register_device(dev);
if (ret < 0)
goto error;
@@ -167,7 +188,7 @@ static void mt76x0e_cleanup(struct mt76x02_dev *dev)
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev);
- mt76x02_mcu_cleanup(&dev->mt76);
+ mt76x02_mcu_cleanup(dev);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index 6c66656c21f4..569861289aa5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -116,6 +116,7 @@ static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
goto out;
}
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "Firmware running!\n");
out:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 4850a2db18d7..cf024950e0ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -14,6 +14,9 @@
* GNU General Public License for more details.
*/
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
#include "mt76x0.h"
#include "mcu.h"
#include "eeprom.h"
@@ -23,8 +26,6 @@
#include "initvals_phy.h"
#include "../mt76x02_phy.h"
-#include <linux/etherdevice.h>
-
static int
mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
{
@@ -37,7 +38,7 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
- if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
mutex_lock(&dev->phy_mutex);
@@ -76,7 +77,7 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
- if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
mutex_lock(&dev->phy_mutex);
@@ -111,15 +112,16 @@ out:
static int
rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
.value = val,
};
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state));
return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
- WARN_ON_ONCE(1);
return mt76x0_rf_csr_wr(dev, offset, val);
}
}
@@ -130,15 +132,16 @@ rf_rr(struct mt76x02_dev *dev, u32 offset)
int ret;
u32 val;
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
};
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state));
ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
- WARN_ON_ONCE(1);
ret = val = mt76x0_rf_csr_rr(dev, offset);
}
@@ -175,9 +178,22 @@ rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
}
#endif
-#define RF_RANDOM_WRITE(dev, tab) \
- mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \
- tab, ARRAY_SIZE(tab))
+static void
+mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
+ int n)
+{
+ while (n-- > 0) {
+ mt76x0_rf_csr_wr(dev, data->reg, data->value);
+ data++;
+ }
+}
+
+#define RF_RANDOM_WRITE(dev, tab) do { \
+ if (mt76_is_mmio(dev)) \
+ mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
+ else \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
+} while (0)
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
{
@@ -186,7 +202,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
do {
val = mt76_rr(dev, MT_BBP(CORE, 0));
- printk("BBP version %08x\n", val);
if (val && ~val)
break;
} while (--i);
@@ -196,36 +211,10 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
return -EIO;
}
+ dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
return 0;
}
-static void
-mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width,
- u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
{
u8 val;
@@ -283,13 +272,6 @@ static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
}
static void
-mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper)
-{
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
-static void
mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
{
switch (band) {
@@ -299,9 +281,6 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x45);
rf_wr(dev, MT_RF(6, 0), 0x44);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
break;
@@ -311,9 +290,6 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x44);
rf_wr(dev, MT_RF(6, 0), 0x45);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
break;
@@ -475,7 +451,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
mt76_wr(dev, MT_RF_MISC, mac_reg);
band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
/*
MT_RF_MISC (offset: 0x0518)
[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@@ -514,7 +490,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
}
static void
-mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
{
int i;
@@ -587,7 +563,7 @@ mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
return ;
}
- mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
+ mt76x02_mcu_function_select(dev, BW_SETTING, bw, false);
}
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
@@ -603,8 +579,50 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info[0]);
- mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]);
+ mt76x02_phy_set_txpower(dev, info[0], info[1]);
+}
+
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 val, tx_alc, reg_val;
+
+ if (power_on) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
+ false);
+ usleep_range(10, 20);
+ /* XXX: tssi */
+ }
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ if (chan->hw_value < 100)
+ val = 0x701;
+ else if (chan->hw_value < 140)
+ val = 0x801;
+ else
+ val = 0x901;
+ } else {
+ val = 0x600;
+ }
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
+ msleep(350);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
+ usleep_range(15000, 20000);
+
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
}
+EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
@@ -665,9 +683,19 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
break;
}
- mt76x0_bbp_set_bw(dev, chandef->width);
- mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
- mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+ if (mt76_is_usb(dev)) {
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ } else {
+ if (chandef->width == NL80211_CHAN_WIDTH_80 ||
+ chandef->width == NL80211_CHAN_WIDTH_40)
+ val = 0x201;
+ else
+ val = 0x601;
+ mt76_wr(dev, MT_TX_SW_CFG0, val);
+ }
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chandef->chan->band,
+ ch_group_index & 1);
mt76x0_ant_select(dev);
mt76_rmw(dev, MT_EXT_CCA_CFG,
@@ -680,7 +708,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
mt76x0_phy_set_band(dev, chandef->chan->band);
mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
- mt76x0_read_rx_gain(dev);
/* set Japan Tx filter at channel 14 */
val = mt76_rr(dev, MT_BBP(CORE, 1));
@@ -690,17 +717,27 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
val &= ~0x20;
mt76_wr(dev, MT_BBP(CORE, 1), val);
- mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+ mt76x0_read_rx_gain(dev);
+ mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
+ mt76x02_init_agc_gain(dev);
- /* Vendor driver don't do it */
- /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+ if (mt76_is_usb(dev)) {
+ mt76x0_vco_cal(dev, channel);
+ } else {
+ /* enable vco */
+ rf_set(dev, MT_RF(0, 4), BIT(7));
+ }
- mt76x0_vco_cal(dev, channel);
if (scan)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
+ return 0;
+ if (mt76_is_mmio(dev))
+ mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+
return 0;
}
@@ -710,7 +747,7 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
u8 channel = dev->mt76.chandef.chan->hw_value;
int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
mt76x0_vco_cal(dev, channel);
@@ -718,109 +755,113 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
usleep_range(500, 700);
- reg_val = mt76_rr(dev, 0x2124);
- reg_val &= 0xffffff7e;
- mt76_wr(dev, 0x2124, reg_val);
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY,
- is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY,
- is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
- mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
msleep(100);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
-}
-
-void mt76x0_agc_save(struct mt76x02_dev *dev)
-{
- /* Only one RX path */
- dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
-}
-
-void mt76x0_agc_restore(struct mt76x02_dev *dev)
-{
- mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
}
static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
{
u8 rf_b7_73, rf_b0_66, rf_b0_67;
- int cycle, temp;
- u32 val;
- s32 sval;
+ s8 val;
rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
- rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 67));
rf_wr(dev, MT_RF(7, 73), 0x02);
rf_wr(dev, MT_RF(0, 66), 0x23);
- rf_wr(dev, MT_RF(0, 73), 0x01);
+ rf_wr(dev, MT_RF(0, 67), 0x01);
mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
- for (cycle = 0; cycle < 2000; cycle++) {
- val = mt76_rr(dev, MT_BBP(CORE, 34));
- if (!(val & 0x10))
- break;
- udelay(3);
- }
-
- if (cycle >= 2000) {
- val &= 0x10;
- mt76_wr(dev, MT_BBP(CORE, 34), val);
+ if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) {
+ mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
goto done;
}
- sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
- if (!(sval & 0x80))
- sval &= 0x7f; /* Positive */
- else
- sval |= 0xffffff00; /* Negative */
+ val = mt76_rr(dev, MT_BBP(CORE, 35));
+ val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
- temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25;
+ if (abs(val - dev->cal.temp_vco) > 20) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
+ dev->mt76.chandef.chan->hw_value,
+ false);
+ dev->cal.temp_vco = val;
+ }
+ if (abs(val - dev->cal.temp) > 30) {
+ mt76x0_phy_calibrate(dev, false);
+ dev->cal.temp = val;
+ }
done:
rf_wr(dev, MT_RF(7, 73), rf_b7_73);
rf_wr(dev, MT_RF(0, 66), rf_b0_66);
- rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+ rf_wr(dev, MT_RF(0, 67), rf_b0_67);
}
-static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev)
+static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- u32 val, init_vga;
- int avg_rssi;
-
- init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E;
- avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
- if (avg_rssi > -60)
- init_vga -= 0x20;
- else if (avg_rssi > -70)
- init_vga -= 0x10;
-
- val = mt76_rr(dev, MT_BBP(AGC, 8));
- val &= 0xFFFF80FF;
- val |= init_vga << 8;
- mt76_wr(dev, MT_BBP(AGC,8), val);
+ u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ u32 val = 0x122c << 16 | 0xf2;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
+}
+
+static void
+mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+ bool gain_change;
+ u8 gain_delta;
+ int low_gain;
+
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
+
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+ gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x0_phy_set_gain_val(dev);
+ return;
+ }
+
+ dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
+ gain_delta = (low_gain == 2) ? 10 : 0;
+
+ dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
+ mt76x0_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
}
-static void mt76x0_phy_calibrate(struct work_struct *work)
+static void mt76x0_phy_calibration_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
cal_work.work);
- mt76x0_dynamic_vga_tuning(dev);
- mt76x0_temp_sensor(dev);
+ mt76x0_phy_update_channel_gain(dev);
+ if (!mt76x0_tssi_enabled(dev))
+ mt76x0_temp_sensor(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
@@ -881,9 +922,9 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev)
void mt76x0_phy_init(struct mt76x02_dev *dev)
{
- INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
mt76x0_rf_init(dev);
- mt76x02_phy_set_rxpath(&dev->mt76);
- mt76x02_phy_set_txdac(&dev->mt76);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index fb6fa1fa5548..a9f14d5149d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -40,8 +40,7 @@ mt76x0u_upload_firmware(struct mt76x02_dev *dev,
ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
ilm_len, MT_MCU_IVB_SIZE);
- err = mt76x02u_mcu_fw_send_data(&dev->mt76,
- fw_payload + MT_MCU_IVB_SIZE,
+ err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
ilm_len, MCU_FW_URB_MAX_PAYLOAD,
MT_MCU_IVB_SIZE);
if (err)
@@ -49,7 +48,7 @@ mt76x0u_upload_firmware(struct mt76x02_dev *dev,
dlm_len = le32_to_cpu(hdr->dlm_len);
dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
- err = mt76x02u_mcu_fw_send_data(&dev->mt76,
+ err = mt76x02u_mcu_fw_send_data(dev,
fw_payload + le32_to_cpu(hdr->ilm_len),
dlm_len, MCU_FW_URB_MAX_PAYLOAD,
MT_MCU_DLM_OFFSET);
@@ -121,7 +120,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
mt76_set(dev, MT_USB_DMA_CFG,
(MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
- mt76x02u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 6000);
/*
mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 65174817ebc4..47c42c607964 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -55,7 +55,8 @@ struct mt76x02_calibration {
s8 agc_gain_adjust;
s8 low_gain;
- u8 temp;
+ s8 temp_vco;
+ s8 temp;
bool init_cal_done;
bool tssi_cal_done;
@@ -101,8 +102,6 @@ struct mt76x02_dev {
bool no_2ghz;
- u8 agc_save;
-
u8 coverage_class;
u8 slottime;
@@ -119,8 +118,8 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
- unsigned int idx);
+void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx);
int mt76x02_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
@@ -136,14 +135,15 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate);
-s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj);
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
+ s8 max_txpwr_adj);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
-bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update);
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
@@ -156,10 +156,17 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
u32 *tx_info);
extern const u16 mt76x02_beacon_offsets[16];
-void mt76x02_set_beacon_offsets(struct mt76_dev *dev);
+void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev);
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
void mt76x02_mac_start(struct mt76x02_dev *dev);
+static inline bool is_mt76x2(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612 ||
+ mt76_chip(&dev->mt76) == 0x7662 ||
+ mt76_chip(&dev->mt76) == 0x7602;
+}
+
static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
{
mt76x02_set_irq_mask(dev, 0, mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index d3efeb8a72b7..9390de2a323e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -17,46 +17,43 @@
#include <asm/unaligned.h>
-#include "mt76.h"
#include "mt76x02_eeprom.h"
-#include "mt76x02_regs.h"
static int
-mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data,
+mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
enum mt76x02_eeprom_modes mode)
{
u32 val;
int i;
- val = __mt76_rr(dev, MT_EFUSE_CTRL);
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
val |= MT_EFUSE_CTRL_KICK;
- __mt76_wr(dev, MT_EFUSE_CTRL, val);
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
- if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK,
- 0, 1000))
+ if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
udelay(2);
- val = __mt76_rr(dev, MT_EFUSE_CTRL);
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
- val = __mt76_rr(dev, MT_EFUSE_DATA(i));
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
-int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode)
{
int ret, i;
@@ -71,26 +68,26 @@ int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
}
EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
-void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev)
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
{
u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
case BOARD_TYPE_5GHZ:
- dev->cap.has_5ghz = true;
+ dev->mt76.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
- dev->cap.has_2ghz = true;
+ dev->mt76.cap.has_2ghz = true;
break;
default:
- dev->cap.has_2ghz = true;
- dev->cap.has_5ghz = true;
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
break;
}
}
EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
-bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
{
u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
@@ -101,7 +98,7 @@ bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
}
EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
-void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
{
u16 val;
@@ -129,7 +126,7 @@ void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
}
EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
-u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index bcd05f7c5f45..b3ec74835d10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -18,6 +18,8 @@
#ifndef __MT76x02_EEPROM_H
#define __MT76x02_EEPROM_H
+#include "mt76x02.h"
+
enum mt76x02_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
@@ -168,44 +170,23 @@ static inline s8 mt76x02_rate_power_val(u8 val)
}
static inline int
-mt76x02_eeprom_get(struct mt76_dev *dev,
+mt76x02_eeprom_get(struct mt76x02_dev *dev,
enum mt76x02_eeprom_field field)
{
if ((field & 1) || field >= __MT_EE_MAX)
return -1;
- return get_unaligned_le16(dev->eeprom.data + field);
-}
-
-static inline bool
-mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev)
-{
- u16 val;
-
- val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
- if (!(val & BIT(15)))
- return false;
-
- return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
- MT_EE_NIC_CONF_1_TEMP_TX_ALC;
-}
-
-static inline bool
-mt76x02_tssi_enabled(struct mt76_dev *dev)
-{
- return !mt76x02_temp_tx_alc_enabled(dev) &&
- (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
- MT_EE_NIC_CONF_1_TX_ALC_EN);
+ return get_unaligned_le16(dev->mt76.eeprom.data + field);
}
-bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band);
-int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode);
-void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
-u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan);
-void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev);
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 244245418ebb..10578e4cb269 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -45,8 +45,8 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
}
EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
-int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
@@ -56,20 +56,20 @@ int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
- val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
- __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
- sizeof(key_data));
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
-int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
@@ -79,25 +79,26 @@ int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
- __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
- __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
memset(iv_data, 0, sizeof(iv_data));
if (key) {
- __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP)
iv_data[3] |= 0x20;
}
- __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
-void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
+ u8 vif_idx, u8 *mac)
{
struct mt76_wcid_addr addr = {};
u32 attr;
@@ -105,10 +106,10 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
- __mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
- __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
if (idx >= 128)
return;
@@ -116,22 +117,22 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
if (mac)
memcpy(addr.macaddr, mac, ETH_ALEN);
- __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
-void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop)
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
{
- u32 val = __mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
u32 bit = MT_WCID_DROP_MASK(idx);
/* prevent unnecessary writes */
if ((val & bit) != (bit * drop))
- __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
-void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
@@ -151,55 +152,13 @@ void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq->wcid = &mvif->group_wcid;
}
- mt76_txq_init(dev, txq);
+ mt76_txq_init(&dev->mt76, txq);
}
EXPORT_SYMBOL_GPL(mt76x02_txq_init);
-static void
-mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb,
- struct ieee80211_sta *sta, int len, u8 nss)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 txwi_flags = 0;
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 63, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- ba_size = 0;
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
- txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
-
- if (ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control))
- txwi_flags |= MT_TXWI_FLAGS_TS;
-
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(len);
-}
-
static __le16
-mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
+mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
u16 rateval;
u8 phy, rate_idx;
@@ -224,10 +183,10 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->chandef.chan->band;
+ int band = dev->mt76.chandef.chan->band;
u16 val;
- r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@@ -248,22 +207,22 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
return cpu_to_le16(rateval);
}
-void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
{
- spin_lock_bh(&dev->lock);
+ spin_lock_bh(&dev->mt76.lock);
wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->lock);
+ spin_unlock_bh(&dev->mt76.lock);
}
-bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat)
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat)
{
u32 stat1, stat2;
- stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO);
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
if (!stat->valid)
@@ -339,17 +298,19 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
return 0;
}
-void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_key_conf *key = info->control.hw_key;
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
+ u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@@ -374,7 +335,7 @@ void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
txwi->eiv = *((__le32 *)&ccmp_pn[1]);
}
- spin_lock_bh(&dev->lock);
+ spin_lock_bh(&dev->mt76.lock);
if (wcid && (rate->idx < 0 || !rate->count)) {
txwi->rate = wcid->tx_rate;
max_txpwr_adj = wcid->max_txpwr_adj;
@@ -383,26 +344,57 @@ void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
- spin_unlock_bh(&dev->lock);
+ spin_unlock_bh(&dev->mt76.lock);
- txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
- if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4)
+ if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
txwi->txstream = 0x13;
- else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 &&
+ else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
!(txwi->rate & cpu_to_le16(rate_ht_mask)))
txwi->txstream = 0x93;
- mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss);
+ if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
static void
-mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x02_tx_status *st, int n_frames)
+mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x02_tx_status *st, int n_frames)
{
struct ieee80211_tx_rate *rate = info->status.rates;
int cur_idx, last_rate;
@@ -413,7 +405,7 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->chandef.chan->band);
+ dev->mt76.chandef.chan->band);
if (last_rate < IEEE80211_TX_MAX_RATES - 1)
rate[last_rate + 1].idx = -1;
@@ -441,8 +433,8 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
info->flags |= IEEE80211_TX_STAT_ACK;
}
-void mt76x02_send_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat, u8 *update)
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
@@ -450,8 +442,8 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
struct mt76x02_sta *msta = NULL;
rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
+ if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
+ wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
if (wcid) {
void *priv;
@@ -476,7 +468,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
}
mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
+ msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
@@ -486,7 +478,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
*update = 1;
}
- ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
out:
rcu_read_unlock();
@@ -561,21 +553,21 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
}
EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
-void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr)
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
{
- ether_addr_copy(dev->macaddr, addr);
+ ether_addr_copy(dev->mt76.macaddr, addr);
- if (!is_valid_ether_addr(dev->macaddr)) {
- eth_random_addr(dev->macaddr);
- dev_info(dev->dev,
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
"Invalid MAC address, using random address %pM\n",
- dev->macaddr);
+ dev->mt76.macaddr);
}
- __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
- __mt76_wr(dev, MT_MAC_ADDR_DW1,
- get_unaligned_le16(dev->macaddr + 4) |
- FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
@@ -697,7 +689,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
- ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat);
+ ret = mt76x02_mac_load_tx_status(dev, &stat);
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
if (!ret)
@@ -706,7 +698,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
- mt76x02_send_tx_status(&dev->mt76, &stat, &update);
+ mt76x02_send_tx_status(dev, &stat, &update);
continue;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 4f7ee4620ab5..d99c18743969 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -198,28 +198,29 @@ mt76x02_skb_tx_info(struct sk_buff *skb)
return (void *)info->status.status_driver_data;
}
-void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
-int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key);
-int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
- struct ieee80211_key_conf *key);
-void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
-void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop);
-void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate);
-bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat);
-void mt76x02_send_tx_status(struct mt76_dev *dev,
- struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key);
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
+ u8 *mac);
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat);
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi);
int
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
-void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr);
-void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 6d565133b7af..1b853bb723fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -19,9 +19,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
-#include "mt76.h"
#include "mt76x02_mcu.h"
-#include "mt76x02_dma.h"
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{
@@ -37,7 +35,7 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
static struct sk_buff *
-mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
+mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
{
unsigned long timeout;
@@ -45,17 +43,17 @@ mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
return NULL;
timeout = expires - jiffies;
- wait_event_timeout(dev->mmio.mcu.wait,
- !skb_queue_empty(&dev->mmio.mcu.res_q),
+ wait_event_timeout(dev->mt76.mmio.mcu.wait,
+ !skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
timeout);
- return skb_dequeue(&dev->mmio.mcu.res_q);
+ return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
}
static int
-mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
+mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = &dev->mt76.q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
@@ -66,24 +64,26 @@ mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
- addr = dma_map_single(dev->dev, skb->data, skb->len,
+ addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (dma_mapping_error(dev->mt76.dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
+
spin_lock_bh(&q->lock);
- dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
- dev->queue_ops->kick(dev, q);
+ mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
-int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ;
int ret;
u8 seq;
@@ -91,11 +91,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
if (!skb)
return -EINVAL;
- mutex_lock(&dev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mmio.mcu.mutex);
- seq = ++dev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
if (!seq)
- seq = ++dev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
if (ret)
@@ -107,7 +107,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
skb = mt76x02_mcu_get_response(dev, expires);
if (!skb) {
- dev_err(dev->dev,
+ dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
@@ -125,13 +125,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
}
out:
- mutex_unlock(&dev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mmio.mcu.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
-int mt76x02_mcu_function_select(struct mt76_dev *dev,
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp)
{
@@ -144,13 +144,12 @@ int mt76x02_mcu_function_select(struct mt76_dev *dev,
.value = cpu_to_le32(val),
};
- skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
- return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
- wait_resp);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
-int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp)
{
struct sk_buff *skb;
@@ -162,13 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
.level = cpu_to_le32(0),
};
- skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
- return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
- wait_resp);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
-int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
u32 param, bool wait)
{
struct sk_buff *skb;
@@ -182,44 +180,44 @@ int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
int ret;
if (wait)
- dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
+ mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
- skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
- ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
if (ret)
return ret;
if (wait &&
- WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0,
- BIT(31), BIT(31), 100)))
+ WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+ BIT(31), BIT(31), 100)))
return -ETIMEDOUT;
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
-int mt76x02_mcu_cleanup(struct mt76_dev *dev)
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
{
struct sk_buff *skb;
- dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1);
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
- while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL)
+ while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
-void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h)
{
u16 bld = le16_to_cpu(h->build_ver);
u16 ver = le16_to_cpu(h->fw_ver);
- snprintf(dev->hw->wiphy->fw_version,
- sizeof(dev->hw->wiphy->fw_version),
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
"%d.%d.%02d-b%x",
(ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
index ce664f8b1c94..2d8fd2514570 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -17,6 +17,8 @@
#ifndef __MT76x02_MCU_H
#define __MT76x02_MCU_H
+#include "mt76x02.h"
+
#define MT_MCU_RESET_CTL 0x070C
#define MT_MCU_INT_LEVEL 0x0718
#define MT_MCU_COM_REG0 0x0730
@@ -94,18 +96,18 @@ struct mt76x02_patch_header {
u8 pad[2];
};
-int mt76x02_mcu_cleanup(struct mt76_dev *dev);
-int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
u32 param, bool wait);
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
-int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp);
-int mt76x02_mcu_function_select(struct mt76_dev *dev,
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp);
-int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp);
-void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h);
#endif /* __MT76x02_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1b945079c802..39f092034240 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -65,7 +65,7 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
u8 update = 1;
while (kfifo_get(&dev->txstatus_fifo, &stat))
- mt76x02_send_tx_status(&dev->mt76, &stat, &update);
+ mt76x02_send_tx_status(dev, &stat, &update);
}
static void mt76x02_tx_tasklet(unsigned long data)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index d31ce1d7b689..0f1d7b5c9f68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -17,18 +17,17 @@
#include <linux/kernel.h>
-#include "mt76.h"
+#include "mt76x02.h"
#include "mt76x02_phy.h"
-#include "mt76x02_mac.h"
-void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
{
u32 val;
- val = __mt76_rr(dev, MT_BBP(AGC, 0));
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
- switch (dev->chainmask & 0xf) {
+ switch (dev->mt76.chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@@ -37,23 +36,23 @@ void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
break;
}
- __mt76_wr(dev, MT_BBP(AGC, 0), val);
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
mb();
- val = __mt76_rr(dev, MT_BBP(AGC, 0));
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
-void mt76x02_phy_set_txdac(struct mt76_dev *dev)
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
- txpath = (dev->chainmask >> 8) & 0xf;
+ txpath = (dev->mt76.chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
- __mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
break;
default:
- __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
break;
}
}
@@ -102,40 +101,38 @@ void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
}
EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
-void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1)
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
{
- struct mt76_rate_power *t = &dev->rate_power;
-
- __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0,
- txp_0);
- __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1,
- txp_1);
-
- __mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
- t->ofdm[2]));
- __mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
- t->ht[2]));
- __mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
- t->ht[10]));
- __mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
- t->stbc[2]));
- __mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
- __mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
- t->vht[9]));
- __mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
- __mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
+ t->ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
+ t->ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
+ t->ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
+ t->stbc[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
+ t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
-int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
+int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
{
struct mt76x02_sta *sta;
struct mt76_wcid *wcid;
@@ -145,8 +142,8 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
local_bh_disable();
rcu_read_lock();
- for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
- unsigned long mask = dev->wcid_mask[i];
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
+ unsigned long mask = dev->mt76.wcid_mask[i];
if (!mask)
continue;
@@ -155,17 +152,17 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
if (!(mask & 1))
continue;
- wcid = rcu_dereference(dev->wcid[j]);
+ wcid = rcu_dereference(dev->mt76.wcid[j]);
if (!wcid)
continue;
sta = container_of(wcid, struct mt76x02_sta, wcid);
- spin_lock(&dev->rx_lock);
+ spin_lock(&dev->mt76.rx_lock);
if (sta->inactive_count++ < 5)
cur_rssi = ewma_signal_read(&sta->rssi);
else
cur_rssi = 0;
- spin_unlock(&dev->rx_lock);
+ spin_unlock(&dev->mt76.rx_lock);
if (cur_rssi < min_rssi)
min_rssi = cur_rssi;
@@ -181,3 +178,81 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
return min_rssi;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
+
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
+
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
+
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
+{
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
+ bool ret = false;
+ u32 false_cca;
+
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
+ if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
+ dev->cal.agc_gain_adjust += 2;
+ ret = true;
+ } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
+ dev->cal.agc_gain_adjust -= 2;
+ ret = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
+
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
+{
+ dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
+ MT_BBP_AGC_GAIN);
+ dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
+ MT_BBP_AGC_GAIN);
+ memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+ sizeof(dev->cal.agc_gain_cur));
+ dev->cal.low_gain = -1;
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index e70ea6eeb077..2b316cf7c70c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -19,12 +19,43 @@
#include "mt76x02_regs.h"
+static inline int
+mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -62;
+ case NL80211_CHAN_WIDTH_40:
+ return -65;
+ default:
+ return -68;
+ }
+}
+
+static inline int
+mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -76;
+ case NL80211_CHAN_WIDTH_40:
+ return -79;
+ default:
+ return -82;
+ }
+}
+
void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
-void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2);
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
-void mt76x02_phy_set_rxpath(struct mt76_dev *dev);
-void mt76x02_phy_set_txdac(struct mt76_dev *dev);
-int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev);
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
+int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper);
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
#endif /* __MT76x02_PHY_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 24d1e6d747dd..f7de77d09d28 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -205,8 +205,8 @@
#define MT_TXQ_STA 0x0434
#define MT_RF_CSR_CFG 0x0500
#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
-#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
-#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15)
#define MT_RF_CSR_CFG_WR BIT(30)
#define MT_RF_CSR_CFG_KICK BIT(31)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 830377221739..d3de08872d6e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -71,7 +71,7 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
-s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate)
{
s8 max_txpwr;
@@ -80,23 +80,23 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->rate_power.vht[8];
+ max_txpwr = dev->mt76.rate_power.vht[8];
} else {
u8 nss, idx;
nss = ieee80211_rate_get_vht_nss(rate);
idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->rate_power.ht[idx & 0xf];
+ max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
}
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
} else {
- enum nl80211_band band = dev->chandef.chan->band;
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
- struct wiphy *wiphy = dev->hw->wiphy;
- struct mt76_rate_power *rp = &dev->rate_power;
+ struct wiphy *wiphy = dev->mt76.hw->wiphy;
+ struct mt76_rate_power *rp = &dev->mt76.rate_power;
r = &wiphy->bands[band]->bitrates[rate->idx];
if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
@@ -104,7 +104,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
else
max_txpwr = rp->ofdm[r->hw_value & 0x7];
} else {
- max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
}
}
@@ -112,10 +112,8 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
-s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj)
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{
- struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
-
txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
txpwr -= (dev->target_power + dev->target_power_delta[0]);
txpwr = min_t(s8, txpwr, max_txpwr_adj);
@@ -133,7 +131,7 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
{
s8 txpwr_adj;
- txpwr_adj = mt76x02_tx_get_txpwr_adj(&dev->mt76, txpwr,
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
dev->mt76.rate_power.ofdm[4]);
mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
@@ -157,8 +155,9 @@ void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
-bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update)
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_tx_status stat;
if (!mt76x02_mac_load_tx_status(dev, &stat))
@@ -181,9 +180,9 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
int ret;
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
- mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false);
+ mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len);
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
ret = mt76x02_insert_hdr_pad(skb);
if (ret < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 6b2138328eb2..0126e51d77ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -17,15 +17,15 @@
#ifndef __MT76x02_USB_H
#define __MT76x02_USB_H
-#include "mt76.h"
+#include "mt76x02.h"
void mt76x02u_init_mcu(struct mt76_dev *dev);
-void mt76x02u_mcu_fw_reset(struct mt76_dev *dev);
-int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
-int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data,
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 7c6c973af386..dc2226c722dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -34,17 +34,6 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
-static int mt76x02u_check_skb_rooms(struct sk_buff *skb)
-{
- int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u32 need_head;
-
- need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN;
- if (hdr_len % 4)
- need_head += 2;
- return skb_cow(skb, need_head);
-}
-
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
struct sk_buff *iter, *last = skb;
@@ -99,17 +88,14 @@ mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
}
-int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data,
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
- int err, len = skb->len;
-
- err = mt76x02u_check_skb_rooms(skb);
- if (err < 0)
- return -ENOMEM;
+ int len = skb->len;
mt76x02_insert_hdr_pad(skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index cb5f073f08af..da299b8a1334 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -17,8 +17,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
-#include "mt76.h"
-#include "mt76x02_dma.h"
+#include "mt76x02.h"
#include "mt76x02_mcu.h"
#include "mt76x02_usb.h"
@@ -255,16 +254,16 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
return ret;
}
-void mt76x02u_mcu_fw_reset(struct mt76_dev *dev)
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
{
- mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x1, 0, NULL, 0);
}
EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
static int
-__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
const void *fw_data, int len, u32 dst_addr)
{
u8 *data = sg_virt(&buf->urb->sg[0]);
@@ -281,14 +280,14 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
memcpy(data + sizeof(info), fw_data, len);
memset(data + sizeof(info) + len, 0, 4);
- mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_ADDR, dst_addr);
len = roundup(len, 4);
- mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_LEN, len << 16);
buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_submit_buf(dev, USB_DIR_OUT,
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
MT_EP_OUT_INBAND_CMD,
buf, GFP_KERNEL,
mt76u_mcu_complete_urb, &cmpl);
@@ -297,31 +296,31 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!wait_for_completion_timeout(&cmpl,
msecs_to_jiffies(1000))) {
- dev_err(dev->dev, "firmware upload timed out\n");
+ dev_err(dev->mt76.dev, "firmware upload timed out\n");
usb_kill_urb(buf->urb);
return -ETIMEDOUT;
}
if (mt76u_urb_error(buf->urb)) {
- dev_err(dev->dev, "firmware upload failed: %d\n",
+ dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
buf->urb->status);
return buf->urb->status;
}
- val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
val++;
- mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
return 0;
}
-int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset)
{
int err, len, pos = 0, max_len = max_payload - 8;
struct mt76u_buf buf;
- err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+ err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
GFP_KERNEL);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 5851ab6b7e26..ca05332f81fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -48,21 +48,21 @@ struct ieee80211_rate mt76x02_rates[] = {
EXPORT_SYMBOL_GPL(mt76x02_rates);
void mt76x02_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
@@ -75,25 +75,25 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
*total_flags = flags;
- dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta)
{
- struct mt76_dev *dev = hw->priv;
- struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int ret = 0;
int idx = 0;
int i;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0) {
ret = -ENOSPC;
goto out;
@@ -113,40 +113,40 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ewma_signal_init(&msta->rssi);
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid);
out:
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_add);
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta)
{
- struct mt76_dev *dev = hw->priv;
- struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
int i;
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
+ mutex_lock(&dev->mt76.mutex);
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(dev, sta->txq[i]);
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
mt76x02_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
+ mt76_wcid_free(dev->mt76.wcid_mask, idx);
mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
-void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
- unsigned int idx)
+void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx)
{
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
@@ -158,11 +158,11 @@ EXPORT_SYMBOL_GPL(mt76x02_vif_init);
int
mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
if (vif->addr[0] & BIT(1))
- idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+ idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
/*
* Client mode typically only has one configurable BSSID register,
@@ -186,20 +186,20 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
EXPORT_SYMBOL_GPL(mt76x02_add_interface);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mt76_txq_remove(dev, vif->txq);
+ mt76_txq_remove(&dev->mt76, vif->txq);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
struct ieee80211_sta *sta = params->sta;
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
u16 tid = params->tid;
@@ -213,12 +213,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size);
- __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
+ *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
break;
case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(dev, &msta->wcid, tid);
- __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
@@ -245,11 +247,11 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
- struct mt76_dev *dev = hw->priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76x02_sta *msta;
struct mt76_wcid *wcid;
int idx = key->keyidx;
@@ -295,7 +297,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key = NULL;
}
- mt76_wcid_key_setup(dev, wcid, key);
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
if (!msta) {
if (key || wcid->hw_key_idx == idx) {
@@ -312,13 +314,13 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(mt76x02_set_key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ u16 queue, const struct ieee80211_tx_queue_params *params)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->q_tx[queue].hw_idx;
+ qid = dev->mt76.q_tx[queue].hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
@@ -329,27 +331,27 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
- val = __mt76_rr(dev, MT_WMM_TXOP(qid));
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_TXOP(qid), val);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
- val = __mt76_rr(dev, MT_WMM_AIFSN);
+ val = mt76_rr(dev, MT_WMM_AIFSN);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_AIFSN, val);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
- val = __mt76_rr(dev, MT_WMM_CWMIN);
+ val = mt76_rr(dev, MT_WMM_CWMIN);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_CWMIN, val);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
- val = __mt76_rr(dev, MT_WMM_CWMAX);
+ val = mt76_rr(dev, MT_WMM_CWMAX);
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- __mt76_wr(dev, MT_WMM_CWMAX, val);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
return 0;
}
@@ -359,7 +361,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
struct ieee80211_tx_rate rate = {};
@@ -425,7 +427,7 @@ const u16 mt76x02_beacon_offsets[16] = {
};
EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
-void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
+void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
{
u16 val, base = MT_BEACON_BASE;
u32 regs[4] = {};
@@ -437,7 +439,7 @@ void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
}
for (i = 0; i < 4; i++)
- __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index bbab021b5f1a..f39b622d03f4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -177,8 +177,8 @@ mt76x2_eeprom_load(struct mt76x02_dev *dev)
efuse = dev->mt76.otp.data;
- if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse,
- MT7662_EEPROM_SIZE, MT_EE_READ))
+ if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
+ MT_EE_READ))
goto out;
if (found) {
@@ -248,22 +248,22 @@ mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
group = mt76x2_get_cal_channel_group(channel);
switch (group) {
case MT_CH_5G_JAPAN:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
case MT_CH_5G_UNII_1:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
case MT_CH_5G_UNII_2E_1:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2E_2:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
default:
- return mt76x02_eeprom_get(&dev->mt76,
+ return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
}
}
@@ -277,14 +277,13 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
else
val = mt76x2_get_5g_rx_gain(dev, channel);
mt76x2_set_rx_gain_group(dev, val);
- mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g);
+ mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
mt76x2_set_rssi_offset(dev, 0, val);
mt76x2_set_rssi_offset(dev, 1, val >> 8);
@@ -293,7 +292,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan);
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
@@ -308,53 +307,49 @@ void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
memset(t, 0, sizeof(*t));
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_5G_6M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
else
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_2G_6M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_5G_24M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
else
- val = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_OFDM_2G_24M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
@@ -390,7 +385,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
t->target_power = val >> 8;
}
@@ -441,7 +436,7 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN);
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
t->target_power = val & 0xff;
}
@@ -453,8 +448,8 @@ void mt76x2_get_power_info(struct mt76x02_dev *dev,
memset(t, 0, sizeof(*t));
- bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40);
- bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80);
+ bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
if (chan->band == NL80211_BAND_5GHZ) {
bw40 >>= 8;
@@ -469,7 +464,7 @@ void mt76x2_get_power_info(struct mt76x02_dev *dev,
MT_EE_TX_POWER_1_START_2G);
}
- if (mt76x02_tssi_enabled(&dev->mt76) ||
+ if (mt76x2_tssi_enabled(dev) ||
!mt76x02_field_valid(t->target_power))
t->target_power = t->chain[0].target_power;
@@ -486,23 +481,20 @@ int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
memset(t, 0, sizeof(*t));
- if (!mt76x02_temp_tx_alc_enabled(&dev->mt76))
+ if (!mt76x2_temp_tx_alc_enabled(dev))
return -EINVAL;
- if (!mt76x02_ext_pa_enabled(&dev->mt76, band))
+ if (!mt76x02_ext_pa_enabled(dev, band))
return -EINVAL;
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
t->temp_25_ref = val & 0x7f;
if (band == NL80211_BAND_5GHZ) {
- slope = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_RF_TEMP_COMP_SLOPE_5G);
- bounds = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_TX_POWER_EXT_PA_5G);
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+ bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
} else {
- slope = mt76x02_eeprom_get(&dev->mt76,
- MT_EE_RF_TEMP_COMP_SLOPE_2G);
- bounds = mt76x02_eeprom_get(&dev->mt76,
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+ bounds = mt76x02_eeprom_get(dev,
MT_EE_TX_POWER_DELTA_BW80) >> 8;
}
@@ -523,7 +515,7 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+ mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
mt76_eeprom_override(&dev->mt76);
dev->mt76.macaddr[0] &= ~BIT(1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index c97b31c77d83..9e735524d367 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -62,7 +62,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
static inline bool
mt76x2_has_ext_lna(struct mt76x02_dev *dev)
{
- u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
+ u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
@@ -70,4 +70,25 @@ mt76x2_has_ext_lna(struct mt76x02_dev *dev)
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
}
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
+{
+ u16 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ if (!(val & BIT(15)))
+ return false;
+
+ return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return !mt76x2_temp_tx_alc_enabled(dev) &&
+ (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index ccd9bc9d3e1e..3c73fdeaf30f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -167,6 +167,9 @@ void mt76x2_init_device(struct mt76x02_dev *dev)
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
+ if (mt76_is_usb(dev))
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 134037a227d7..88bd62cfbdf9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -59,7 +59,6 @@ EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
- struct mt76_dev *mdev = &dev->mt76;
struct sk_buff *skb;
struct {
u8 cr_mode;
@@ -76,8 +75,8 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u32 val;
val = BIT(31);
- val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
- val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index cbec8c6f1b2d..ab93125f46de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -100,8 +100,6 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
enum nl80211_band band);
void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
enum nl80211_band band, u8 bw);
-void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
-void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper);
void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index f229c6eb65dc..3824290b219d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -43,7 +43,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
u16 eep_val;
s8 offset = 0;
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@@ -53,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@@ -64,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@@ -143,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
for (i = 0; i < 256; i++)
- mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL);
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
for (i = 0; i < MT_MAX_VIFS; i++)
- mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL);
+ mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
for (i = 0; i < 16; i++)
for (k = 0; k < 4; k++)
- mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL);
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
for (i = 0; i < 8; i++) {
mt76x2_mac_set_bssid(dev, i, null_addr);
@@ -168,7 +168,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
MT_CH_TIME_CFG_EIFS_AS_BUSY |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
- mt76x02_set_beacon_offsets(&dev->mt76);
+ mt76x02_set_beacon_offsets(dev);
mt76x2_set_tx_ackto(dev);
@@ -337,7 +337,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
- mt76x02_mcu_set_radio_state(&dev->mt76, false, true);
+ mt76x02_mcu_set_radio_state(dev, false, true);
mt76x2_mac_stop(dev, false);
}
@@ -347,7 +347,7 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
mt76x02_dma_cleanup(dev);
- mt76x02_mcu_cleanup(&dev->mt76);
+ mt76x02_mcu_cleanup(dev);
}
struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
index 08366c5988ea..4b331ed14bb2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
@@ -36,7 +36,7 @@ mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len);
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 65fef082e7cc..034a06295668 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -172,7 +172,7 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps);
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 898aa229671c..d8fa9ba56437 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -140,7 +140,7 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
- val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
@@ -152,8 +152,8 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
return -ETIMEDOUT;
}
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_info(dev->mt76.dev, "Firmware running!\n");
- mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
release_firmware(fw);
@@ -183,6 +183,6 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true);
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1, true);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 40ea5f7480fb..5bda44540225 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -26,7 +26,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u32 flag = 0;
- if (!mt76x02_tssi_enabled(&dev->mt76))
+ if (!mt76x2_tssi_enabled(dev))
return false;
if (mt76x2_channel_silent(dev))
@@ -35,10 +35,10 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
- if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true);
dev->cal.tssi_cal_done = true;
return true;
}
@@ -62,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_stop(dev, false);
if (is_5ghz)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true);
if (!mac_stopped)
mt76x2_mac_resume(dev);
@@ -125,39 +125,6 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
}
static void
-mt76x2_get_agc_gain(struct mt76x02_dev *dev, u8 *dest)
-{
- dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
- dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
-}
-
-static int
-mt76x2_get_rssi_gain_thresh(struct mt76x02_dev *dev)
-{
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80:
- return -62;
- case NL80211_CHAN_WIDTH_40:
- return -65;
- default:
- return -68;
- }
-}
-
-static int
-mt76x2_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
-{
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80:
- return -76;
- case NL80211_CHAN_WIDTH_40:
- return -79;
- default:
- return -82;
- }
-}
-
-static void
mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
{
u32 val;
@@ -183,25 +150,6 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
}
static void
-mt76x2_phy_adjust_vga_gain(struct mt76x02_dev *dev)
-{
- u32 false_cca;
- u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
-
- false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
- dev->cal.false_cca = false_cca;
- if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
- dev->cal.agc_gain_adjust += 2;
- else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
- (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
- dev->cal.agc_gain_adjust -= 2;
- else
- return;
-
- mt76x2_phy_set_gain_val(dev);
-}
-
-static void
mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
@@ -210,16 +158,17 @@ mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
- low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
- (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
- mt76x2_phy_adjust_vga_gain(dev);
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x2_phy_set_gain_val(dev);
return;
}
@@ -337,8 +286,8 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
mt76x2_configure_tx_delay(dev, band, bw);
mt76x2_phy_set_txpower(dev);
- mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
- mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -361,17 +310,17 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
- u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true);
}
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true);
dev->cal.init_cal_done = true;
@@ -384,14 +333,11 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
if (scan)
return 0;
- dev->cal.low_gain = -1;
mt76x2_phy_channel_calibrate(dev, true);
- mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
- memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
- sizeof(dev->cal.agc_gain_cur));
+ mt76x02_init_agc_gain(dev);
/* init default values for temp compensation */
- if (mt76x02_tssi_enabled(&dev->mt76)) {
+ if (mt76x2_tssi_enabled(dev)) {
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
0x38);
mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
@@ -449,7 +395,7 @@ int mt76x2_phy_start(struct mt76x02_dev *dev)
{
int ret;
- ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true);
+ ret = mt76x02_mcu_set_radio_state(dev, true, true);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index f00aed915ee8..e9fff5b7f125 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -65,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
} else {
@@ -76,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
pa_mode[0] = 0x0000ffff;
pa_mode[1] = 0x00ff00ff;
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
} else {
@@ -84,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
}
- if (mt76x02_ext_pa_enabled(&dev->mt76, band))
+ if (mt76x02_ext_pa_enabled(dev, band))
pa_mode_adj = 0x04000000;
else
pa_mode_adj = 0;
@@ -98,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
u32 val;
if (band == NL80211_BAND_2GHZ)
@@ -187,7 +187,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
dev->mt76.rate_power = t;
- mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1);
+ mt76x02_phy_set_txpower(dev, txp_0, txp_1);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
@@ -196,7 +196,7 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
{
u32 cfg0, cfg1;
- if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
cfg0 = bw ? 0x000b0c01 : 0x00101101;
cfg1 = 0x00011414;
} else {
@@ -210,50 +210,6 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
-void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
-
-void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
-
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
@@ -275,7 +231,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
dev->cal.tssi_comp_pending = false;
mt76x2_get_power_info(dev, &txp, chan);
- if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
t.pa_mode = 1;
t.cal_mode = BIT(1);
@@ -289,8 +245,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
return;
usleep_range(10000, 20000);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD,
- chan->hw_value, wait);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait);
dev->cal.dpd_cal_done = true;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index c82f16efa327..13cce2937573 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -130,7 +130,7 @@ static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
put_unaligned_le32(val, dev->mt76.eeprom.data + i);
}
- mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+ mt76x02_eeprom_parse_hw_cap(dev);
return 0;
}
@@ -204,8 +204,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
if (err < 0)
return err;
- mt76x02_mac_setaddr(&dev->mt76,
- dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
mt76x2u_init_beacon_offsets(dev);
@@ -237,8 +236,8 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
if (err < 0)
return err;
- mt76x02_phy_set_rxpath(&dev->mt76);
- mt76x02_phy_set_txdac(&dev->mt76);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
return mt76x2u_mac_stop(dev);
}
@@ -303,7 +302,7 @@ void mt76x2u_stop_hw(struct mt76x02_dev *dev)
void mt76x2u_cleanup(struct mt76x02_dev *dev)
{
- mt76x02_mcu_set_radio_state(&dev->mt76, false, false);
+ mt76x02_mcu_set_radio_state(dev, false, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index dbd635aa763b..db2194a92e67 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -32,7 +32,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
s8 offset = 0;
u16 eep_val;
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
/* init fce */
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
- eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 224609d6915f..1971a1b00038 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -50,9 +50,9 @@ static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
- mt76x02_mac_setaddr(&dev->mt76, vif->addr);
+ mt76x02_mac_setaddr(dev, vif->addr);
- mt76x02_vif_init(&dev->mt76, vif, 0);
+ mt76x02_vif_init(dev, vif, 0);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index 259ceae2a3a9..3f1e558e5e6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -137,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
/* vendor reset */
- mt76x02u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable FCE to send in-band cmd */
@@ -151,7 +151,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
/* FCE skip_fs_en */
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
- err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
fw->size - sizeof(*hdr),
MCU_ROM_PATCH_MAX_PAYLOAD,
MT76U_MCU_ROM_PATCH_OFFSET);
@@ -210,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
/* vendor reset */
- mt76x02u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable USB_DMA_CFG */
@@ -230,7 +230,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
/* load ILM */
- err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
ilm_len, MCU_FW_URB_MAX_PAYLOAD,
MT76U_MCU_ILM_OFFSET);
if (err < 0) {
@@ -241,8 +241,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
/* load DLM */
if (mt76xx_rev(dev) >= MT76XX_REV_E3)
dlm_offset += 0x800;
- err = mt76x02u_mcu_fw_send_data(&dev->mt76,
- fw->data + sizeof(*hdr) + ilm_len,
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
dlm_len, MCU_FW_URB_MAX_PAYLOAD,
dlm_offset);
if (err < 0) {
@@ -260,8 +259,8 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
/* enable FCE to send in-band cmd */
mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "firmware running\n");
- mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
out:
release_firmware(fw);
@@ -283,10 +282,9 @@ int mt76x2u_mcu_init(struct mt76x02_dev *dev)
{
int err;
- err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT,
- 1, false);
+ err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (err < 0)
return err;
- return mt76x02_mcu_set_radio_state(&dev->mt76, true, false);
+ return mt76x02_mcu_set_radio_state(dev, true, false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index b11f8a6a6254..ca96ba60510e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -29,12 +29,12 @@ void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
mt76x2u_mac_stop(dev);
if (is_5ghz)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false);
mt76x2u_mac_resume(dev);
}
@@ -69,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
break;
}
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
mt76_rr(dev, MT_RX_STAT_1));
@@ -155,8 +155,8 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
mt76x2_configure_tx_delay(dev, chan->band, bw);
mt76x2_phy_set_txpower(dev);
- mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
- mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -177,18 +177,17 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
- u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R,
- 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
}
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false);
dev->cal.init_cal_done = true;
mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@@ -203,7 +202,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
if (scan)
return 0;
- if (mt76x02_tssi_enabled(&dev->mt76)) {
+ if (mt76x2_tssi_enabled(dev)) {
/* init default values for temp compensation */
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
0x38);
@@ -218,10 +217,9 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
chan = dev->mt76.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
- if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI,
- flag, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false);
dev->cal.tssi_cal_done = true;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index bf0e9e666bc4..7cbce03aa65b 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -96,7 +96,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (!ieee80211_is_data_qos(hdr->frame_control))
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ !ieee80211_is_data_present(hdr->frame_control))
return;
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 6b643ea701e3..5f0faf07c346 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -318,7 +318,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!buf->urb)
return -ENOMEM;
- buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
gfp);
if (!buf->urb->sg)
return -ENOMEM;
@@ -525,8 +525,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -755,8 +755,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
INIT_LIST_HEAD(&q->swq);
q->hw_idx = mt76_ac_to_hwq(i);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -862,6 +862,7 @@ int mt76u_init(struct mt76_dev *dev,
.copy = mt76u_copy,
.wr_rp = mt76u_wr_rp,
.rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
};
struct mt76_usb *usb = &dev->usb;
diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig
index de84ce125c26..7628d9c1ea6a 100644
--- a/drivers/net/wireless/quantenna/Kconfig
+++ b/drivers/net/wireless/quantenna/Kconfig
@@ -1,7 +1,7 @@
config WLAN_VENDOR_QUANTENNA
bool "Quantenna wireless cards support"
default y
- ---help---
+ help
If you have a wireless card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 8d1492a90bd1..b8c12a5f16b4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -11,7 +11,7 @@ config QTNFMAC_PEARL_PCIE
select QTNFMAC
select FW_LOADER
select CRC32
- ---help---
+ help
This option adds support for wireless adapters based on Quantenna
802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 5aca12a51fe3..95c7b95c6f8a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
index f21e97ede090..634480fe6a64 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications */
#ifndef _QTN_FMAC_PCIE_IPC_H_
#define _QTN_FMAC_PCIE_IPC_H_
@@ -85,11 +72,6 @@
#define QTN_EP_LHOST_TQE_PORT 4
-enum qtnf_pcie_bda_ipc_flags {
- QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0),
- QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1),
-};
-
enum qtnf_fw_loadtype {
QTN_FW_DBEGIN,
QTN_FW_DSUB,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
index 0bfe285b6b48..6e9a5c61d46f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
@@ -1,28 +1,10 @@
-/*
- * Copyright (c) 2015 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications */
#ifndef __PEARL_PCIE_H
#define __PEARL_PCIE_H
-#define PCIE_GEN2_BASE (0xe9000000)
-#define PCIE_GEN3_BASE (0xe7000000)
-
-#define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE)
-#define PCIE_HDP_OFFSET (0x2000)
-
+/* Pearl PCIe HDP registers */
#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
@@ -86,7 +68,7 @@
#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
-/* Host HBM pool registers */
+/* Pearl PCIe HBM pool registers */
#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08)
@@ -104,230 +86,13 @@
#define HBM_INT_STATUS(base) ((base) + 0x2f9c)
#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c)
-/* host HBM bit field definition */
+/* Pearl PCIe HBM bit field definitions */
#define HHBM_CONFIG_SOFT_RESET (BIT(8))
#define HHBM_WR_REQ (BIT(0))
#define HHBM_RD_REQ (BIT(1))
#define HHBM_DONE (BIT(31))
#define HHBM_64BIT (BIT(10))
-/* offsets for dual PCIE */
-#define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710)
-#define PCIE_GEN2_CTL(base) ((base) + 0x080C)
-#define PCIE_GEN3_OFF(base) ((base) + 0x0890)
-#define PCIE_ATU_CTRL1(base) ((base) + 0x0904)
-#define PCIE_ATU_CTRL2(base) ((base) + 0x0908)
-#define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C)
-#define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910)
-#define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914)
-#define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918)
-#define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C)
-#define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C)
-#define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988)
-#define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C)
-#define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC)
-#define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4)
-#define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC)
-#define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0)
-#define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00)
-#define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980)
-#define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C)
-#define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0)
-#define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8)
-#define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC)
-#define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10)
-#define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18)
-#define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C)
-#define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24)
-#define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28)
-#define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48)
-#define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C)
-#define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C)
-#define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70)
-#define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78)
-#define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C)
-#define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80)
-#define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84)
-#define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88)
-#define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C)
-#define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90)
-#define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00)
-#define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34)
-#define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000)
-#define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004)
-#define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008)
-#define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c)
-#define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010)
-#define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014)
-#define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018)
-#define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c)
-#define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000)
-#define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004)
-#define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008)
-#define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c)
-#define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010)
-#define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014)
-#define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018)
-#define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c)
-#define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020)
-#define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024)
-#define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028)
-#define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c)
-#define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030)
-#define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034)
-#define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038)
-#define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c)
-#define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040)
-#define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044)
-#define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048)
-#define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c)
-#define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050)
-#define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054)
-#define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058)
-#define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c)
-#define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060)
-#define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064)
-#define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068)
-#define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c)
-#define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070)
-#define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074)
-#define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078)
-#define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c)
-
-#define PCIE_ID(base) ((base) + 0x0000)
-#define PCIE_CMD(base) ((base) + 0x0004)
-#define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2))
-#define PCIE_CAP_PTR(base) ((base) + 0x0034)
-#define PCIE_MSI_LBAR(base) ((base) + 0x0054)
-#define PCIE_MSI_CTRL(base) ((base) + 0x0050)
-#define PCIE_MSI_ADDR_L(base) ((base) + 0x0054)
-#define PCIE_MSI_ADDR_H(base) ((base) + 0x0058)
-#define PCIE_MSI_DATA(base) ((base) + 0x005C)
-#define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060)
-#define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064)
-#define PCIE_DEVCAP(base) ((base) + 0x0074)
-#define PCIE_DEVCTLSTS(base) ((base) + 0x0078)
-
-#define PCIE_CMDSTS(base) ((base) + 0x0004)
-#define PCIE_LINK_STAT(base) ((base) + 0x80)
-#define PCIE_LINK_CTL2(base) ((base) + 0xa0)
-#define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c)
-#define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT)
-#define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44)
-#define PCIE_L1SUB_CTRL1(base) ((base) + 0x150)
-#define PCIE_PMCSR(base) ((base) + 0x44)
-#define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100)
-
-/* PCIe link defines */
-#define PEARL_PCIE_LINKUP (0x7)
-#define PEARL_PCIE_DATA_LINK (BIT(0))
-#define PEARL_PCIE_PHY_LINK (BIT(1))
-#define PEARL_PCIE_LINK_RST (BIT(3))
-#define PEARL_PCIE_FATAL_ERR (BIT(5))
-#define PEARL_PCIE_NONFATAL_ERR (BIT(6))
-
-/* PCIe Lane defines */
-#define PCIE_G2_LANE_X1 ((BIT(0)) << 16)
-#define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16)
-
-/* PCIe DLL link enable */
-#define PCIE_DLL_LINK_EN ((BIT(0)) << 5)
-
-#define PCIE_LINK_GEN1 (BIT(0))
-#define PCIE_LINK_GEN2 (BIT(1))
-#define PCIE_LINK_GEN3 (BIT(2))
-#define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7)
-
-#define MSI_EN (BIT(0))
-#define MSI_64_EN (BIT(7))
-#define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF)
-#define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF))
-
-#define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2))
-#define PCIE_MAX_BAR (6)
-
-#define PCIE_ATU_VIEW(base) ((base) + 0x0900)
-#define PCIE_ATU_CTL1(base) ((base) + 0x0904)
-#define PCIE_ATU_CTL2(base) ((base) + 0x0908)
-#define PCIE_ATU_LBAR(base) ((base) + 0x090c)
-#define PCIE_ATU_UBAR(base) ((base) + 0x0910)
-#define PCIE_ATU_LAR(base) ((base) + 0x0914)
-#define PCIE_ATU_LTAR(base) ((base) + 0x0918)
-#define PCIE_ATU_UTAR(base) ((base) + 0x091c)
-
-#define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820)
-#define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824)
-#define PCIE_MSI_ENABLE(base) ((base) + 0x0828)
-#define PCIE_MSI_MASK_RC(base) ((base) + 0x082c)
-#define PCIE_MSI_STATUS(base) ((base) + 0x0830)
-#define PEARL_PCIE_MSI_REGION (0xce000000)
-#define PEARL_PCIE_MSI_DATA (0)
-#define PCIE_MSI_GPIO(base) ((base) + 0x0888)
-
-#define PCIE_HDP_HOST_QUEUE_FULL (BIT(17))
-#define USE_BAR_MATCH_MODE
-#define PCIE_ATU_OB_REGION (BIT(0))
-#define PCIE_ATU_EN_REGION (BIT(31))
-#define PCIE_ATU_EN_MATCH (BIT(30))
-#define PCIE_BASE_REGION (0xb0000000)
-#define PCIE_MEM_MAP_SIZE (512 * 1024)
-
-#define PCIE_OB_REG_REGION (0xcf000000)
-#define PCIE_CONFIG_REGION (0xcf000000)
-#define PCIE_CONFIG_SIZE (4096)
-#define PCIE_CONFIG_CH (1)
-
-/* inbound mapping */
-#define PCIE_IB_BAR0 (0x00000000) /* ddr */
-#define PCIE_IB_BAR0_CH (0)
-#define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */
-#define PCIE_IB_BAR3_CH (1)
-
-/* outbound mapping */
-#define PCIE_MEM_CH (0)
-#define PCIE_REG_CH (1)
-#define PCIE_MEM_REGION (0xc0000000)
-#define PCIE_MEM_SIZE (0x000fffff)
-#define PCIE_MEM_TAR (0x80000000)
-
-#define PCIE_MSI_REGION (0xce000000)
-#define PCIE_MSI_SIZE (KBYTE(4) - 1)
-#define PCIE_MSI_CH (1)
-
-/* size of config region */
-#define PCIE_CFG_SIZE (0x0000ffff)
-
-#define PCIE_ATU_DIR_IB (BIT(31))
-#define PCIE_ATU_DIR_OB (0)
-#define PCIE_ATU_DIR_CFG (2)
-#define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30))
-
-#define PCIE_DMA_WR_0 (0)
-#define PCIE_DMA_WR_1 (1)
-#define PCIE_DMA_RD_0 (2)
-#define PCIE_DMA_RD_1 (3)
-
-#define PCIE_DMA_CHNL_CNTRL_CB (BIT(0))
-#define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1))
-#define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2))
-#define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3))
-#define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4))
-#define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8))
-#define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9))
-#define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26))
-
-#define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31))
-#define PCIE_DMA_CHNL_CONTEXT_WR (0)
-#define PCIE_MAX_BAR (6)
-
/* PCIe HDP interrupt status definition */
#define PCIE_HDP_INT_EP_RXDMA (BIT(0))
#define PCIE_HDP_INT_HBM_UF (BIT(1))
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 73f6fc0d4a01..56040b181cf5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4918,11 +4918,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
- u16 seq_number;
u16 rate_flag = tx_info->control.rates[0].flags;
int tx_desc_size = priv->fops->tx_desc_size;
int ret;
- bool usedesc40, ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -4946,7 +4945,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (ieee80211_is_action(hdr->frame_control))
rtl8xxxu_dump_action(dev, hdr);
- usedesc40 = (tx_desc_size == 40);
tx_info->rate_driver_data[0] = hw;
if (control && control->sta)
@@ -5013,7 +5011,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 317c1b3101da..ba258318ee9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3404,75 +3404,6 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
"%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
-static u8 _rtl8821ae_mrate_idx_to_arfr_id(
- struct ieee80211_hw *hw, u8 rate_index,
- enum wireless_mode wirelessmode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 ret = 0;
- switch (rate_index) {
- case RATR_INX_WIRELESS_NGB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 1;
- else
- ret = 0;
- ; break;
- case RATR_INX_WIRELESS_N:
- case RATR_INX_WIRELESS_NG:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 5;
- else
- ret = 4;
- ; break;
- case RATR_INX_WIRELESS_NB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 3;
- else
- ret = 2;
- ; break;
- case RATR_INX_WIRELESS_GB:
- ret = 6;
- break;
- case RATR_INX_WIRELESS_G:
- ret = 7;
- break;
- case RATR_INX_WIRELESS_B:
- ret = 8;
- break;
- case RATR_INX_WIRELESS_MC:
- if ((wirelessmode == WIRELESS_MODE_B)
- || (wirelessmode == WIRELESS_MODE_G)
- || (wirelessmode == WIRELESS_MODE_N_24G)
- || (wirelessmode == WIRELESS_MODE_AC_24G))
- ret = 6;
- else
- ret = 7;
- case RATR_INX_WIRELESS_AC_5N:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 10;
- else
- ret = 9;
- break;
- case RATR_INX_WIRELESS_AC_24N:
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- if (rtlphy->rf_type == RF_1T1R)
- ret = 10;
- else
- ret = 9;
- } else {
- if (rtlphy->rf_type == RF_1T1R)
- ret = 11;
- else
- ret = 12;
- }
- break;
- default:
- ret = 0; break;
- }
- return ret;
-}
-
static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
{
u8 i, j, tmp_rate;
@@ -3761,7 +3692,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
break;
}
- ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
+ ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
sta_entry->ratr_index = ratr_index;
ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
ratr_bitmap);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 722537e14848..41b49716ac75 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq == -EPROBE_DEFER,
+ "device deferred probe failed - %d\n", irq);
- /* Test that a parsing failure does not return -EPROBE_DEFER */
- np = of_find_node_by_path("/testcase-data/testcase-device2");
- pdev = of_find_device_by_node(np);
- unittest(pdev, "device 2 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ unittest(pdev, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq < 0 && irq != -EPROBE_DEFER,
+ "device parsing error failed - %d\n", irq);
+ }
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 86f1b002c846..975bcdd6b5c0 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -180,11 +180,11 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
return 0;
}
- phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
diff --git a/drivers/phy/mscc/phy-ocelot-serdes.c b/drivers/phy/mscc/phy-ocelot-serdes.c
index 8936abd22f0f..b2be54680cf7 100644
--- a/drivers/phy/mscc/phy-ocelot-serdes.c
+++ b/drivers/phy/mscc/phy-ocelot-serdes.c
@@ -257,8 +257,8 @@ static int serdes_probe(struct platform_device *pdev)
ctrl->dev = &pdev->dev;
ctrl->regs = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (!ctrl->regs)
- return -ENODEV;
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
for (i = 0; i <= SERDES_MAX; i++) {
ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4a8a8efadefa..cf73a403d22d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
+ return 0;
+}
+
+static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
+{
+ struct gpio_chip *chip = &mcp->chip;
+ int err;
+
err = gpiochip_irqchip_add_nested(chip,
&mcp23s08_irq_chip,
0,
@@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
if (mcp->irq && mcp->irq_controller) {
- ret = mcp23s08_irq_setup(mcp);
+ ret = mcp23s08_irqchip_setup(mcp);
if (ret)
goto fail;
}
@@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
+ if (mcp->irq)
+ ret = mcp23s08_irq_setup(mcp);
+
fail:
if (ret < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 398393ab5df8..b6fd4838f60f 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+ memcpy(&ec_dev->event_data, msg->data, ret);
}
return ret;
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index eceba3858cef..2f61f5579aa5 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
*/
-void __sclp_early_printk(const char *str, unsigned int len)
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
{
int have_linemode, have_vt220;
- if (sclp_init_state != sclp_init_state_uninitialized)
+ if (!force && sclp_init_state != sclp_init_state_uninitialized)
return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return;
@@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len)
void sclp_early_printk(const char *str)
{
- __sclp_early_printk(str, strlen(str));
+ __sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+ __sclp_early_printk(str, strlen(str), 1);
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index dbe7c7ac9ac8..fd77e46eb3b2 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
for (i = 0; i < pat->pat_nr; i++, pa++)
for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ if (pa->pa_iova_pfn[j] == iova_pfn)
return true;
return false;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 770fa9cfc310..f47d16b5810b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -22,6 +22,7 @@
#include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
/*
* Helpers
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
cp_update_scsw(&private->cp, &irb->scsw);
cp_free(&private->cp);
}
- memcpy(private->io_region.irb_area, irb, sizeof(*irb));
+ memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private)
return -ENOMEM;
+
+ private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->io_region) {
+ kfree(private);
+ return -ENOMEM;
+ }
+
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
@@ -139,6 +148,7 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return ret;
}
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return 0;
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_work_q)
return -ENOMEM;
+ vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+ sizeof(struct ccw_io_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_io_region), NULL);
+ if (!vfio_ccw_io_region) {
+ destroy_workqueue(vfio_ccw_work_q);
+ return -ENOMEM;
+ }
+
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
+ kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
}
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC);
+ kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
}
module_init(vfio_ccw_sch_init);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 797a82731159..f94aa01f9c36 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
- private->io_region.ret_code = -EIO;
+ private->io_region->ret_code = -EIO;
}
static void fsm_io_busy(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
- private->io_region.ret_code = -EBUSY;
+ private->io_region->ret_code = -EBUSY;
}
static void fsm_disabled_irq(struct vfio_ccw_private *private,
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
{
union orb *orb;
union scsw *scsw = &private->scsw;
- struct ccw_io_region *io_region = &private->io_region;
+ struct ccw_io_region *io_region = private->io_region;
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 41eeb57d68a3..f673e106c041 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- region = &private->io_region;
+ region = private->io_region;
if (copy_to_user(buf, (void *)region + *ppos, count))
return -EFAULT;
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
if (private->state != VFIO_CCW_STATE_IDLE)
return -EACCES;
- region = &private->io_region;
+ region = private->io_region;
if (copy_from_user((void *)region + *ppos, buf, count))
return -EFAULT;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 78a66d96756b..078e46f9623d 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -41,7 +41,7 @@ struct vfio_ccw_private {
atomic_t avail;
struct mdev_device *mdev;
struct notifier_block nb;
- struct ccw_io_region io_region;
+ struct ccw_io_region *io_region;
struct channel_program cp;
struct irb irb;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index cd44ff2df6fe..6843bc7ee9f2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -390,8 +390,9 @@ enum qeth_layer2_frame_flags {
enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
- QETH_HEADER_TYPE_TSO = 0x03,
+ QETH_HEADER_TYPE_L3_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
+ QETH_HEADER_TYPE_L2_TSO = 0x06,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@@ -1047,6 +1048,8 @@ int qeth_vm_request_mac(struct qeth_card *card);
int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr **hdr, unsigned int hdr_len,
unsigned int proto_len, unsigned int *elements);
+void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
+ struct sk_buff *skb, unsigned int proto_len);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 1771d0073c0c..3274f13aad57 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4088,15 +4088,31 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
+void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
+ struct sk_buff *skb, unsigned int proto_len)
+{
+ struct qeth_hdr_ext_tso *ext = &hdr->ext;
+
+ ext->hdr_tot_len = sizeof(*ext);
+ ext->imb_hdr_no = 1;
+ ext->hdr_type = 1;
+ ext->hdr_version = 1;
+ ext->hdr_len = 28;
+ ext->payload_len = payload_len;
+ ext->mss = skb_shinfo(skb)->gso_size;
+ ext->dg_hdr_len = proto_len;
+}
+EXPORT_SYMBOL_GPL(qeth_fill_tso_ext);
+
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len))
{
- const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
- const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+ unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
+ bool is_tso = skb_is_gso(skb);
unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
@@ -4104,6 +4120,14 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
int push_len, rc;
bool is_sg;
+ if (is_tso) {
+ hw_hdr_len = sizeof(struct qeth_hdr_tso);
+ proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ hw_hdr_len = sizeof(struct qeth_hdr);
+ proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+ }
+
rc = skb_cow_head(skb, hw_hdr_len);
if (rc)
return rc;
@@ -4112,13 +4136,16 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
&elements);
if (push_len < 0)
return push_len;
- if (!push_len) {
+ if (is_tso || !push_len) {
/* HW header needs its own buffer element. */
hd_len = hw_hdr_len + proto_len;
- data_offset = proto_len;
+ data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+ if (is_tso)
+ qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
+ frame_len - proto_len, skb, proto_len);
is_sg = skb_is_nonlinear(skb);
if (IS_IQD(card)) {
@@ -4136,6 +4163,10 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
card->perf_stats.buf_elements_sent += elements;
if (is_sg)
card->perf_stats.sg_skbs_sent++;
+ if (is_tso) {
+ card->perf_stats.large_send_bytes += frame_len;
+ card->perf_stats.large_send_cnt++;
+ }
}
} else {
if (!push_len)
@@ -5394,6 +5425,21 @@ static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
return cmd->hdr.return_code;
}
+static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_ipa_caps *caps = reply->param;
+
+ if (qeth_setassparms_inspect_rc(cmd))
+ return 0;
+
+ caps->supported = cmd->data.setassparms.data.caps.supported;
+ caps->enabled = cmd->data.setassparms.data.caps.enabled;
+ return 0;
+}
+
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -6396,27 +6442,85 @@ static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
return rc ? -EIO : 0;
}
-static int qeth_set_ipa_tso(struct qeth_card *card, int on)
+static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_tso_start_data *tso_data = reply->param;
+
+ if (qeth_setassparms_inspect_rc(cmd))
+ return 0;
+
+ tso_data->mss = cmd->data.setassparms.data.tso.mss;
+ tso_data->supported = cmd->data.setassparms.data.tso.supported;
+ return 0;
+}
+
+static int qeth_set_tso_off(struct qeth_card *card,
+ enum qeth_prot_versions prot)
{
+ return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_STOP, 0, prot);
+}
+
+static int qeth_set_tso_on(struct qeth_card *card,
+ enum qeth_prot_versions prot)
+{
+ struct qeth_tso_start_data tso_data;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_caps caps;
int rc;
- QETH_CARD_TEXT(card, 3, "sttso");
+ iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_START, 0, prot);
+ if (!iob)
+ return -ENOMEM;
- if (on) {
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_START, 0);
- if (rc) {
- dev_warn(&card->gdev->dev,
- "Starting outbound TCP segmentation offload for %s failed\n",
- QETH_CARD_IFNAME(card));
- return -EIO;
- }
- dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
- } else {
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_STOP, 0);
+ rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */,
+ qeth_start_tso_cb, &tso_data);
+ if (rc)
+ return rc;
+
+ if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
+ qeth_set_tso_off(card, prot);
+ return -EOPNOTSUPP;
}
- return rc;
+
+ iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
+ if (!iob) {
+ qeth_set_tso_off(card, prot);
+ return -ENOMEM;
+ }
+
+ /* enable TSO capability */
+ caps.supported = 0;
+ caps.enabled = QETH_IPA_LARGE_SEND_TCP;
+ rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps,
+ qeth_setassparms_get_caps_cb, &caps);
+ if (rc) {
+ qeth_set_tso_off(card, prot);
+ return rc;
+ }
+
+ if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
+ !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
+ qeth_set_tso_off(card, prot);
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
+ tso_data.mss);
+ return 0;
+}
+
+static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
+ enum qeth_prot_versions prot)
+{
+ int rc = on ? qeth_set_tso_on(card, prot) :
+ qeth_set_tso_off(card, prot);
+
+ return rc ? -EIO : 0;
}
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
@@ -6443,7 +6547,7 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
}
#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
- NETIF_F_IPV6_CSUM)
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
@@ -6493,11 +6597,18 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
if (rc)
changed ^= NETIF_F_RXCSUM;
}
- if ((changed & NETIF_F_TSO)) {
- rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
+ if (changed & NETIF_F_TSO) {
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
+ QETH_PROT_IPV4);
if (rc)
changed ^= NETIF_F_TSO;
}
+ if (changed & NETIF_F_TSO6) {
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
+ QETH_PROT_IPV6);
+ if (rc)
+ changed ^= NETIF_F_TSO6;
+ }
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
@@ -6523,6 +6634,8 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
+ if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
+ features &= ~NETIF_F_TSO6;
/* if the card isn't up, remove features that require hw changes */
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index aa5de1fe01e1..e85090467afe 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -56,6 +56,21 @@ static inline bool qeth_intparm_is_iob(unsigned long intparm)
#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
+struct qeth_ipa_caps {
+ u32 supported;
+ u32 enabled;
+};
+
+static inline bool qeth_ipa_caps_supported(struct qeth_ipa_caps *caps, u32 mask)
+{
+ return (caps->supported & mask) == mask;
+}
+
+static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
+{
+ return (caps->enabled & mask) == mask;
+}
+
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
@@ -405,14 +420,25 @@ struct qeth_checksum_cmd {
__u32 enabled;
} __packed;
+enum qeth_ipa_large_send_caps {
+ QETH_IPA_LARGE_SEND_TCP = 0x00000001,
+};
+
+struct qeth_tso_start_data {
+ u32 mss;
+ u32 supported;
+};
+
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
+ struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
+ struct qeth_tso_start_data tso;
__u8 ip[16];
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c810d53fff51..23aaf373f631 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -197,15 +197,19 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len)
{
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
- hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
hdr->hdr.l2.pkt_length = data_len;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ if (skb_is_gso(skb)) {
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
+ } else {
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
}
/* set byte byte 3 to casting flags */
@@ -897,6 +901,20 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
+ if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+ card->dev->hw_features |= NETIF_F_TSO;
+ card->dev->vlan_features |= NETIF_F_TSO;
+ }
+ if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
+ card->dev->hw_features |= NETIF_F_TSO6;
+ card->dev->vlan_features |= NETIF_F_TSO6;
+ }
+
+ if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
+ netif_set_gso_max_size(card->dev,
+ PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
+ }
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 80893481bb85..0b161cc1fd2e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2037,7 +2037,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.length = data_len;
if (skb_is_gso(skb)) {
- hdr->hdr.l3.id = QETH_HEADER_TYPE_TSO;
+ hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2099,22 +2099,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
-static void qeth_l3_fill_tso_ext(struct qeth_hdr_tso *hdr,
- unsigned int payload_len, struct sk_buff *skb,
- unsigned int proto_len)
-{
- struct qeth_hdr_ext_tso *ext = &hdr->ext;
-
- ext->hdr_tot_len = sizeof(*ext);
- ext->imb_hdr_no = 1;
- ext->hdr_type = 1;
- ext->hdr_version = 1;
- ext->hdr_len = 28;
- ext->payload_len = payload_len;
- ext->mss = skb_shinfo(skb)->gso_size;
- ext->dg_hdr_len = proto_len;
-}
-
static void qeth_l3_fixup_headers(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@@ -2175,9 +2159,9 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
if (is_tso)
- qeth_l3_fill_tso_ext((struct qeth_hdr_tso *) hdr,
- frame_len - proto_len, skb,
- proto_len);
+ qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
+ frame_len - proto_len, skb,
+ proto_len);
}
is_sg = skb_is_nonlinear(skb);
@@ -2401,6 +2385,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ unsigned int headroom;
int rc;
if (card->dev->netdev_ops)
@@ -2415,11 +2400,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
}
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
- card->dev->needed_headroom = sizeof(struct qeth_hdr);
- /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
- card->dev->needed_headroom += VLAN_HLEN;
- if (qeth_is_supported(card, IPA_OUTBOUND_TSO))
- card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
/*IPv6 address autoconfiguration stuff*/
qeth_l3_get_unique_id(card);
@@ -2438,10 +2418,22 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_IPV6_CSUM;
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
+ if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
+ card->dev->hw_features |= NETIF_F_TSO6;
+ card->dev->vlan_features |= NETIF_F_TSO6;
+ }
+
+ /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
+ if (card->dev->hw_features & NETIF_F_TSO6)
+ headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
+ else if (card->dev->hw_features & NETIF_F_TSO)
+ headroom = sizeof(struct qeth_hdr_tso);
+ else
+ headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
- card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
+ headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
@@ -2452,13 +2444,14 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
} else
return -ENODEV;
+ card->dev->needed_headroom = headroom;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
- if (card->dev->hw_features & NETIF_F_TSO)
+ if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 7b31f19ade83..050879a2ddef 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
static int __init openprom_init(void)
{
- struct device_node *dp;
int err;
err = misc_register(&openprom_dev);
if (err)
return err;
- dp = of_find_node_by_path("/");
- dp = dp->child;
- while (dp) {
- if (!strcmp(dp->name, "options"))
- break;
- dp = dp->sibling;
- }
- options_node = dp;
-
+ options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
if (!options_node) {
misc_deregister(&openprom_dev);
return -EIO;
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 524f9ea62e52..6516bc3cb58b 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
alloc_error:
kfree(ctx->ccb_buf);
done:
- if (ctx != NULL)
- kfree(ctx);
+ kfree(ctx);
return -ENOMEM;
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cc8e64dc65ad..e5bd035ebad0 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
/* start qedi context */
spin_lock_init(&qedi->hba_lock);
spin_lock_init(&qedi->task_idx_lock);
+ mutex_init(&qedi->stats_lock);
}
qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
qedi_ops->ll2->start(qedi->cdev, &params);
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 05c42235dd41..7c3cc968053c 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size)
*/
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
+static int __bman_probed;
static int bman_fbpr(struct reserved_mem *rmem)
{
@@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr)
return IRQ_HANDLED;
}
+int bman_is_probed(void)
+{
+ return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
static int fsl_bman_probe(struct platform_device *pdev)
{
int ret, err_irq;
@@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
u16 id, bm_pool_cnt;
u8 major, minor;
+ __bman_probed = -1;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
return ret;
}
+ __bman_probed = 1;
+
return 0;
};
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 79cba58387a5..6fd5fef5f39b 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = {
static u32 __iomem *qm_ccsr_start;
/* A SDQCR mask comprising all the available/visible pool channels */
static u32 qm_pools_sdqcr;
+static int __qman_probed;
static inline u32 qm_ccsr_in(u32 offset)
{
@@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev)
return 0;
}
+int qman_is_probed(void)
+{
+ return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
static int fsl_qman_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
u16 id;
u8 major, minor;
+ __qman_probed = -1;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
if (ret)
return ret;
+ __qman_probed = 1;
+
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index a120002b630e..3e9391d117c5 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -227,6 +227,14 @@ static int qman_portal_probe(struct platform_device *pdev)
int irq, cpu, err;
u32 val;
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
return -ENOMEM;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index e1e264a9a4c7..28fc4ce75edb 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
u8 link, depth;
u64 route;
- /*
- * After NVM upgrade adding root switch device fails because we
- * initiated reset. During that time ICM might still send
- * XDomain connected message which we ignore here.
- */
- if (!tb->root_switch)
- return;
-
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (pkg->hdr.packet_id)
return;
- /*
- * After NVM upgrade adding root switch device fails because we
- * initiated reset. During that time ICM might still send device
- * connected message which we ignore here.
- */
- if (!tb->root_switch)
- return;
-
route = get_route(pkg->route_hi, pkg->route_lo);
authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work)
mutex_lock(&tb->lock);
- switch (n->pkg->code) {
- case ICM_EVENT_DEVICE_CONNECTED:
- icm->device_connected(tb, n->pkg);
- break;
- case ICM_EVENT_DEVICE_DISCONNECTED:
- icm->device_disconnected(tb, n->pkg);
- break;
- case ICM_EVENT_XDOMAIN_CONNECTED:
- icm->xdomain_connected(tb, n->pkg);
- break;
- case ICM_EVENT_XDOMAIN_DISCONNECTED:
- icm->xdomain_disconnected(tb, n->pkg);
- break;
+ /*
+ * When the domain is stopped we flush its workqueue but before
+ * that the root switch is removed. In that case we should treat
+ * the queued events as being canceled.
+ */
+ if (tb->root_switch) {
+ switch (n->pkg->code) {
+ case ICM_EVENT_DEVICE_CONNECTED:
+ icm->device_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_DEVICE_DISCONNECTED:
+ icm->device_disconnected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_CONNECTED:
+ icm->xdomain_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_DISCONNECTED:
+ icm->xdomain_disconnected(tb, n->pkg);
+ break;
+ }
}
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 88cff05a1808..5cd6bdfa068f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void)
tb_domain_exit();
}
-fs_initcall(nhi_init);
+rootfs_initcall(nhi_init);
module_exit(nhi_unload);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index fa8dcb470640..d31b975dd3fd 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data->skip_autocfg)
dw8250_setup_port(p);
-#ifdef CONFIG_PM
- uart.capabilities |= UART_CAP_RPM;
-#endif
-
/* If we have a valid fifosize, try hooking up DMA */
if (p->fifosize) {
data->dma.rxconf.src_maxburst = p->fifosize / 4;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 29ec34387246..1515074e18fb 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
geni_se_select_mode(&port->se, port->xfer_mode);
if (!uart_console(uport)) {
- port->rx_fifo = devm_kzalloc(uport->dev,
- port->rx_fifo_depth * sizeof(u32), GFP_KERNEL);
+ port->rx_fifo = devm_kcalloc(uport->dev,
+ port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
if (!port->rx_fifo)
return -ENOMEM;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ac4424bf6b13..ab3f6e91853d 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -292,6 +292,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
},
/*
+ * The "SCIFA" that is in RZ/T and RZ/A2.
+ * It looks like a normal SCIF with FIFO data, but with a
+ * compressed address space. Also, the break out of interrupts
+ * are different: ERI/BRI, RXI, TXI, TEI, DRI.
+ */
+ [SCIx_RZ_SCIFA_REGTYPE] = {
+ .regs = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 16 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0A, 8 },
+ [SCFCR] = { 0x0C, 16 },
+ [SCFDR] = { 0x0E, 16 },
+ [SCSPTR] = { 0x10, 16 },
+ [SCLSR] = { 0x12, 16 },
+ },
+ .fifosize = 16,
+ .overrun_reg = SCLSR,
+ .overrun_mask = SCLSR_ORER,
+ .sampling_rate_mask = SCI_SR(32),
+ .error_mask = SCIF_DEFAULT_ERROR_MASK,
+ .error_clear = SCIF_ERROR_CLEAR,
+ },
+
+ /*
* Common SH-3 SCIF definitions.
*/
[SCIx_SH3_SCIF_REGTYPE] = {
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
[SCIx_SH4_SCIF_REGTYPE] = {
.regs = {
[SCSMR] = { 0x00, 16 },
- [SCBRR] = { 0x02, 8 },
- [SCSCR] = { 0x04, 16 },
- [SCxTDR] = { 0x06, 8 },
- [SCxSR] = { 0x08, 16 },
- [SCxRDR] = { 0x0a, 8 },
- [SCFCR] = { 0x0c, 16 },
- [SCFDR] = { 0x0e, 16 },
- [SCSPTR] = { 0x10, 16 },
- [SCLSR] = { 0x12, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
},
.fifosize = 16,
.overrun_reg = SCLSR,
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
{
struct uart_port *port = &sci_port->port;
const struct resource *res;
- unsigned int i, regtype;
+ unsigned int i;
int ret;
sci_port->cfg = p;
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
if (unlikely(sci_port->params == NULL))
return -EINVAL;
- regtype = sci_port->params - sci_port_params;
switch (p->type) {
case PORT_SCIFB:
sci_port->rx_trigger = 48;
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
port->regshift = 1;
}
- if (regtype == SCIx_SH4_SCIF_REGTYPE)
- if (sci_port->reg_size >= 0x20)
- port->regshift = 1;
-
/*
* The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
.compatible = "renesas,scif-r7s72100",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
},
+ {
+ .compatible = "renesas,scif-r7s9210",
+ .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+ },
/* Family-specific types */
{
.compatible = "renesas,rcar-gen1-scif",
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f9b40a9dc4d3..bc03b0a690b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1514,6 +1514,7 @@ static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
+ int i;
/* sibling interface is already cleaning up */
if (!acm)
@@ -1544,6 +1545,11 @@ static void acm_disconnect(struct usb_interface *intf)
tty_unregister_device(acm_tty_driver, acm->minor);
+ usb_free_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_free_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 7334da9e9779..71d0d33c3286 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
xhci_mtk_host_enable(mtk);
xhci_dbg(xhci, "%s: restart port polling\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6372edf339d9..722860eb5a91 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -185,6 +185,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0215b70c4efc..e72ad9f81c73 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface is reserved */
#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
+/* Interface must have two endpoints */
+#define NUMEP2 BIT(16)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
- .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial,
if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
+ /*
+ * Allow matching on bNumEndpoints for devices whose interface numbers
+ * can change (e.g. Quectel EP06).
+ */
+ if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
+ return -ENODEV;
+
/* Store the device flags so we can use them during attach. */
usb_set_serial_data(serial, (void *)device_flags);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 40864c2bd9dc..4d0273508043 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 8235b285dbb2..d09bab3bf224 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+extern const u8 aty_postdividers[8];
+
/*
* Hardware cursor support
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a9a8272f7a6e..05111e90f168 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
/*
* PLL Reference Divider M:
*/
- M = pll_regs[2];
+ M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
- N = pll_regs[7 + (clock_cntl & 3)];
+ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 74a62aa193c0..f87cc81f4fa2 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
*/
#define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
- pll->ct.xclk_post_div_real = postdividers[xpost_div];
+ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
- sclk_post_div_real = postdividers[mpost_div];
+ sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index f3d0bef16d78..6127f0fcd62c 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
*/
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
{
+ struct hlist_node **p;
+ struct afs_cell *pcell;
int ret;
if (!cell->anonymous_key) {
@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
return ret;
mutex_lock(&net->proc_cells_lock);
- list_add_tail(&cell->proc_link, &net->proc_cells);
+ for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
+ pcell = hlist_entry(*p, struct afs_cell, proc_link);
+ if (strcmp(cell->name, pcell->name) < 0)
+ break;
+ }
+
+ cell->proc_link.pprev = p;
+ cell->proc_link.next = *p;
+ rcu_assign_pointer(*p, &cell->proc_link.next);
+ if (cell->proc_link.next)
+ cell->proc_link.next->pprev = &cell->proc_link.next;
+
afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
- list_del_init(&cell->proc_link);
+ hlist_del_rcu(&cell->proc_link);
afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 1cde710a8013..f29c6dade7f6 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -265,7 +265,7 @@ int afs_dynroot_populate(struct super_block *sb)
return -ERESTARTSYS;
net->dynroot_sb = sb;
- list_for_each_entry(cell, &net->proc_cells, proc_link) {
+ hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
ret = afs_dynroot_mkdir(net, cell);
if (ret < 0)
goto error;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 8ae4e2ebb99a..72de1f157d20 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -244,7 +244,7 @@ struct afs_net {
seqlock_t cells_lock;
struct mutex proc_cells_lock;
- struct list_head proc_cells;
+ struct hlist_head proc_cells;
/* Known servers. Theoretically each fileserver can only be in one
* cell, but in practice, people create aliases and subsets and there's
@@ -322,7 +322,7 @@ struct afs_cell {
struct afs_net *net;
struct key *anonymous_key; /* anonymous user key for this cell */
struct work_struct manager; /* Manager for init/deinit/dns */
- struct list_head proc_link; /* /proc cell list link */
+ struct hlist_node proc_link; /* /proc cell list link */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie *cache; /* caching cookie */
#endif
diff --git a/fs/afs/main.c b/fs/afs/main.c
index e84fe822a960..107427688edd 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns)
timer_setup(&net->cells_timer, afs_cells_timer, 0);
mutex_init(&net->proc_cells_lock);
- INIT_LIST_HEAD(&net->proc_cells);
+ INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
net->fs_servers = RB_ROOT;
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 476dcbb79713..9101f62707af 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
static int afs_proc_cells_show(struct seq_file *m, void *v)
{
struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
- struct afs_net *net = afs_seq2net(m);
- if (v == &net->proc_cells) {
+ if (v == SEQ_START_TOKEN) {
/* display header on line 1 */
seq_puts(m, "USE NAME\n");
return 0;
@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
__acquires(rcu)
{
rcu_read_lock();
- return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos);
+ return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos);
}
static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
{
- return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos);
+ return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos);
}
static void afs_proc_cells_stop(struct seq_file *m, void *v)
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 1d329e6981d5..2f306c0cc4ee 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -199,9 +199,11 @@ static struct afs_server *afs_install_server(struct afs_net *net,
write_sequnlock(&net->fs_addr_lock);
ret = 0;
+ goto out;
exists:
afs_get_server(server);
+out:
write_sequnlock(&net->fs_lock);
return server;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 03128ed1f34e..3c159a7f9a9e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -975,6 +975,10 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
{
struct gfs2_inode *ip = GFS2_I(inode);
+ if (!page_has_buffers(page)) {
+ create_empty_buffers(page, inode->i_sb->s_blocksize,
+ (1 << BH_Dirty)|(1 << BH_Uptodate));
+ }
gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 5289e22cb081..42ea7bab9144 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1220,35 +1220,92 @@ retry:
return 0;
}
+/* Unlock both inodes after they've been prepped for a range clone. */
+STATIC void
+xfs_reflink_remap_unlock(
+ struct file *file_in,
+ struct file *file_out)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ bool same_inode = (inode_in == inode_out);
+
+ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+ if (!same_inode)
+ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ inode_unlock(inode_out);
+ if (!same_inode)
+ inode_unlock_shared(inode_in);
+}
+
/*
- * Link a range of blocks from one file to another.
+ * If we're reflinking to a point past the destination file's EOF, we must
+ * zero any speculative post-EOF preallocations that sit between the old EOF
+ * and the destination file offset.
*/
-int
-xfs_reflink_remap_range(
+static int
+xfs_reflink_zero_posteof(
+ struct xfs_inode *ip,
+ loff_t pos)
+{
+ loff_t isize = i_size_read(VFS_I(ip));
+
+ if (pos <= isize)
+ return 0;
+
+ trace_xfs_zero_eof(ip, isize, pos - isize);
+ return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
+ &xfs_iomap_ops);
+}
+
+/*
+ * Prepare two files for range cloning. Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file will
+ * be truncated, and any leases on the out file will have been broken. This
+ * function borrows heavily from xfs_file_aio_write_checks.
+ *
+ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
+ * checked that the bytes beyond EOF physically match. Hence we cannot use the
+ * EOF block in the source dedupe range because it's not a complete block match,
+ * hence can introduce a corruption into the file that has it's block replaced.
+ *
+ * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
+ * "block aligned" for the purposes of cloning entire files. However, if the
+ * source file range includes the EOF block and it lands within the existing EOF
+ * of the destination file, then we can expose stale data from beyond the source
+ * file EOF in the destination file.
+ *
+ * XFS doesn't support partial block sharing, so in both cases we have check
+ * these cases ourselves. For dedupe, we can simply round the length to dedupe
+ * down to the previous whole block and ignore the partial EOF block. While this
+ * means we can't dedupe the last block of a file, this is an acceptible
+ * tradeoff for simplicity on implementation.
+ *
+ * For cloning, we want to share the partial EOF block if it is also the new EOF
+ * block of the destination file. If the partial EOF block lies inside the
+ * existing destination EOF, then we have to abort the clone to avoid exposing
+ * stale data in the destination file. Hence we reject these clone attempts with
+ * -EINVAL in this case.
+ */
+STATIC int
+xfs_reflink_remap_prep(
struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
- u64 len,
+ u64 *len,
bool is_dedupe)
{
struct inode *inode_in = file_inode(file_in);
struct xfs_inode *src = XFS_I(inode_in);
struct inode *inode_out = file_inode(file_out);
struct xfs_inode *dest = XFS_I(inode_out);
- struct xfs_mount *mp = src->i_mount;
bool same_inode = (inode_in == inode_out);
- xfs_fileoff_t sfsbno, dfsbno;
- xfs_filblks_t fsblen;
- xfs_extlen_t cowextsize;
+ u64 blkmask = i_blocksize(inode_in) - 1;
ssize_t ret;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return -EOPNOTSUPP;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
/* Lock both files against IO */
ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
if (ret)
@@ -1270,33 +1327,115 @@ xfs_reflink_remap_range(
goto out_unlock;
ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
- &len, is_dedupe);
+ len, is_dedupe);
if (ret <= 0)
goto out_unlock;
+ /*
+ * If the dedupe data matches, chop off the partial EOF block
+ * from the source file so we don't try to dedupe the partial
+ * EOF block.
+ */
+ if (is_dedupe) {
+ *len &= ~blkmask;
+ } else if (*len & blkmask) {
+ /*
+ * The user is attempting to share a partial EOF block,
+ * if it's inside the destination EOF then reject it.
+ */
+ if (pos_out + *len < i_size_read(inode_out)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
/* Attach dquots to dest inode before changing block map */
ret = xfs_qm_dqattach(dest);
if (ret)
goto out_unlock;
- trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
/*
- * Clear out post-eof preallocations because we don't have page cache
- * backing the delayed allocations and they'll never get freed on
- * their own.
+ * Zero existing post-eof speculative preallocations in the destination
+ * file.
*/
- if (xfs_can_free_eofblocks(dest, true)) {
- ret = xfs_free_eofblocks(dest);
- if (ret)
- goto out_unlock;
- }
+ ret = xfs_reflink_zero_posteof(dest, pos_out);
+ if (ret)
+ goto out_unlock;
/* Set flags and remap blocks. */
ret = xfs_reflink_set_inode_flag(src, dest);
if (ret)
goto out_unlock;
+ /* Zap any page cache for the destination file's range. */
+ truncate_inode_pages_range(&inode_out->i_data, pos_out,
+ PAGE_ALIGN(pos_out + *len) - 1);
+
+ /* If we're altering the file contents... */
+ if (!is_dedupe) {
+ /*
+ * ...update the timestamps (which will grab the ilock again
+ * from xfs_fs_dirty_inode, so we have to call it before we
+ * take the ilock).
+ */
+ if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+ ret = file_update_time(file_out);
+ if (ret)
+ goto out_unlock;
+ }
+
+ /*
+ * ...clear the security bits if the process is not being run
+ * by root. This keeps people from modifying setuid and setgid
+ * binaries.
+ */
+ ret = file_remove_privs(file_out);
+ if (ret)
+ goto out_unlock;
+ }
+
+ return 1;
+out_unlock:
+ xfs_reflink_remap_unlock(file_in, file_out);
+ return ret;
+}
+
+/*
+ * Link a range of blocks from one file to another.
+ */
+int
+xfs_reflink_remap_range(
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len,
+ bool is_dedupe)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ struct xfs_mount *mp = src->i_mount;
+ xfs_fileoff_t sfsbno, dfsbno;
+ xfs_filblks_t fsblen;
+ xfs_extlen_t cowextsize;
+ ssize_t ret;
+
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ /* Prepare and then clone file data. */
+ ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+ &len, is_dedupe);
+ if (ret <= 0)
+ return ret;
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
dfsbno = XFS_B_TO_FSBT(mp, pos_out);
sfsbno = XFS_B_TO_FSBT(mp, pos_in);
fsblen = XFS_B_TO_FSB(mp, len);
@@ -1305,10 +1444,6 @@ xfs_reflink_remap_range(
if (ret)
goto out_unlock;
- /* Zap any page cache for the destination file's range. */
- truncate_inode_pages_range(&inode_out->i_data, pos_out,
- PAGE_ALIGN(pos_out + len) - 1);
-
/*
* Carry the cowextsize hint from src to dest if we're sharing the
* entire source file to the entire destination file, the source file
@@ -1325,12 +1460,7 @@ xfs_reflink_remap_range(
is_dedupe);
out_unlock:
- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
- if (!same_inode)
- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
- inode_unlock(inode_out);
- if (!same_inode)
- inode_unlock_shared(inode_in);
+ xfs_reflink_remap_unlock(file_in, file_out);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return ret;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7b75ff6e2fce..d7701d466b60 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -68,7 +68,7 @@
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
@@ -613,8 +613,8 @@
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
- *(.fini_array) \
- *(.dtors) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
MEM_DISCARD(exit.data*) \
MEM_DISCARD(exit.rodata*)
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ff20b677fb9f..22254c1fe1c5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -412,6 +412,7 @@ struct cgroup {
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/* per-cpu recursive resource statistics */
struct cgroup_rstat_cpu __percpu *rstat_cpu;
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8942e61f0028..8ab5df769923 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0ea328e71ec9..a4d5eb37744a 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -95,6 +95,13 @@ struct gpio_irq_chip {
unsigned int num_parents;
/**
+ * @parent_irq:
+ *
+ * For use by gpiochip_set_cascaded_irqchip()
+ */
+ unsigned int parent_irq;
+
+ /**
* @parents:
*
* A list of interrupt parents of a GPIO chip. This is owned by the
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c4809ad8ab46..0ef67f837ae1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1670,6 +1670,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 26a92462f4ce..4b75796cac23 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1228,21 +1228,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
-#ifndef CONFIG_MLX5_CORE_IPOIB
-static inline
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-#else
+#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
void (*setup)(struct net_device *));
#endif /* CONFIG_MLX5_CORE_IPOIB */
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params);
struct mlx5_profile {
u64 mask;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3f4c0b167333..d4b0c79d2924 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -668,10 +668,6 @@ typedef struct pglist_data {
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
-#ifdef CONFIG_NUMA_BALANCING
- /* Lock serializing the migrate rate limiting window */
- spinlock_t numabalancing_migrate_lock;
-#endif
/*
* This is a per-node reserve of pages that are not available
* to userspace allocations.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 76603ee136a8..dc1d9ed33b31 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2496,6 +2496,13 @@ struct netdev_notifier_info {
struct netlink_ext_ack *extack;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
@@ -3638,6 +3645,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
return 0;
}
+bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 5d65521260b3..06996ad4f2bc 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,8 @@ struct qcom_scm_vmperm {
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
#define QCOM_SCM_PERM_READ 0x4
#define QCOM_SCM_PERM_WRITE 0x2
#define QCOM_SCM_PERM_EXEC 0x1
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index c0e795d95477..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -36,6 +36,7 @@ enum {
SCIx_SH4_SCIF_FIFODATA_REGTYPE,
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
+ SCIx_RZ_SCIFA_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a28ac9284f0..3f529ad9a9d2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void)
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
+extern bool pm_suspend_via_s2idle(void);
extern void __init pm_states_init(void);
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
extern void s2idle_wake(void);
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
static inline bool pm_suspend_via_firmware(void) { return false; }
static inline bool pm_resume_via_firmware(void) { return false; }
+static inline bool pm_suspend_via_s2idle(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 848f5b25e178..8ed77bb4ed86 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -249,6 +249,7 @@ struct tcp_sock {
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
/* RTT measurement */
u64 tcp_mstamp; /* most recent packet received/sent */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1ec67536bbab..1fa41b7a1be3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -775,6 +775,12 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @ftm_responder: enable FTM responder functionality; -1 for no change
+ * (which also implies no change in LCI/civic location data)
+ * @lci: LCI subelement content
+ * @civicloc: Civic location subelement content
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic location data length
*/
struct cfg80211_beacon_data {
const u8 *head, *tail;
@@ -782,12 +788,17 @@ struct cfg80211_beacon_data {
const u8 *proberesp_ies;
const u8 *assocresp_ies;
const u8 *probe_resp;
+ const u8 *lci;
+ const u8 *civicloc;
+ s8 ftm_responder;
size_t head_len, tail_len;
size_t beacon_ies_len;
size_t proberesp_ies_len;
size_t assocresp_ies_len;
size_t probe_resp_len;
+ size_t lci_len;
+ size_t civicloc_len;
};
struct mac_address {
@@ -1292,6 +1303,10 @@ struct cfg80211_tid_stats {
* @ack_signal: signal strength (in dBm) of the last ACK frame.
* @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
* been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
*/
struct station_info {
u64 filled;
@@ -1338,6 +1353,9 @@ struct station_info {
struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -2797,6 +2815,40 @@ struct cfg80211_external_auth_params {
};
/**
+ * cfg80211_ftm_responder_stats - FTM responder statistics
+ *
+ * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
+ * indicate the relevant values in this struct for them
+ * @success_num: number of FTM sessions in which all frames were successfully
+ * answered
+ * @partial_num: number of FTM sessions in which part of frames were
+ * successfully answered
+ * @failed_num: number of failed FTM sessions
+ * @asap_num: number of ASAP FTM sessions
+ * @non_asap_num: number of non-ASAP FTM sessions
+ * @total_duration_ms: total sessions durations - gives an indication
+ * of how much time the responder was busy
+ * @unknown_triggers_num: number of unknown FTM triggers - triggers from
+ * initiators that didn't finish successfully the negotiation phase with
+ * the responder
+ * @reschedule_requests_num: number of FTM reschedule requests - initiator asks
+ * for a new scheduling although it already has scheduled FTM slot
+ * @out_of_window_triggers_num: total FTM triggers out of scheduled window
+ */
+struct cfg80211_ftm_responder_stats {
+ u32 filled;
+ u32 success_num;
+ u32 partial_num;
+ u32 failed_num;
+ u32 asap_num;
+ u32 non_asap_num;
+ u64 total_duration_ms;
+ u32 unknown_triggers_num;
+ u32 reschedule_requests_num;
+ u32 out_of_window_triggers_num;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3128,6 +3180,9 @@ struct cfg80211_external_auth_params {
*
* @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
* tells the driver that the frame should not be encrypted.
+ *
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3433,6 +3488,10 @@ struct cfg80211_ops {
const u8 *buf, size_t len,
const u8 *dest, const __be16 proto,
const bool noencrypt);
+
+ int (*get_ftm_responder_stats)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
};
/*
@@ -3960,7 +4019,6 @@ struct wiphy_iftype_ext_capab {
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
*
- * @cookie_counter: unique generic cookie counter, used to identify objects.
* @nan_supported_bands: bands supported by the device in NAN mode, a
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
@@ -4099,8 +4157,6 @@ struct wiphy {
u32 bss_select_support;
- u64 cookie_counter;
-
u8 nan_supported_bands;
u32 txq_limit;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 9a70755ad1c2..45db0c79462d 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -298,7 +298,7 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
-#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8,
DEVLINK_PARAM_TYPE_U16,
@@ -311,7 +311,7 @@ union devlink_param_value {
u8 vu8;
u16 vu16;
u32 vu32;
- const char *vstr;
+ char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
bool vbool;
};
@@ -568,6 +568,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src);
struct devlink_region *devlink_region_create(struct devlink *devlink,
const char *region_name,
u32 region_max_snapshots,
@@ -804,6 +806,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
}
+static inline void
+devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+}
+
static inline struct devlink_region *
devlink_region_create(struct devlink *devlink,
const char *region_name,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index f06e968f1992..caabfd84a098 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -407,6 +407,9 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
void *arg);
+void fib6_clean_all_skip_notify(struct net *net,
+ int (*func)(struct fib6_info *, void *arg),
+ void *arg);
int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9846b79c9ee1..852e4ebf2209 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -395,6 +395,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ff33f498c137..829650540780 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1089,8 +1089,6 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#endif
#ifdef CONFIG_SYSCTL
-extern struct ctl_table ipv6_route_table_template[];
-
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
int ipv6_sysctl_register(void);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c4fadbafbf21..71985e95d2d9 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -309,6 +309,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
* @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
+ * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder
+ * functionality changed for this BSS (AP mode).
*
*/
enum ieee80211_bss_change {
@@ -338,6 +340,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_MU_GROUPS = 1<<23,
BSS_CHANGED_KEEP_ALIVE = 1<<24,
BSS_CHANGED_MCAST_RATE = 1<<25,
+ BSS_CHANGED_FTM_RESPONDER = 1<<26,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -464,6 +467,21 @@ struct ieee80211_mu_group_data {
};
/**
+ * ieee80211_ftm_responder_params - FTM responder parameters
+ *
+ * @lci: LCI subelement content
+ * @civicloc: CIVIC location subelement content
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic data length
+ */
+struct ieee80211_ftm_responder_params {
+ const u8 *lci;
+ const u8 *civicloc;
+ size_t lci_len;
+ size_t civicloc_len;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -562,6 +580,9 @@ struct ieee80211_mu_group_data {
* @protected_keep_alive: if set, indicates that the station should send an RSN
* protected frame to the AP to reset the idle timer at the AP for the
* station.
+ * @ftm_responder: whether to enable or disable fine timing measurement FTM
+ * responder functionality.
+ * @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -612,6 +633,8 @@ struct ieee80211_bss_conf {
bool allow_p2p_go_ps;
u16 max_idle_period;
bool protected_keep_alive;
+ bool ftm_responder;
+ struct ieee80211_ftm_responder_params *ftmr_params;
};
/**
@@ -3598,6 +3621,8 @@ enum ieee80211_reconfig_type {
* aggregating two specific frames in the same A-MSDU. The relation
* between the skbs should be symmetric and transitive. Note that while
* skb is always a real frame, head may or may not be an A-MSDU.
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3883,6 +3908,9 @@ struct ieee80211_ops {
bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw,
struct sk_buff *head,
struct sk_buff *skb);
+ int (*get_ftm_responder_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
};
/**
@@ -4352,6 +4380,21 @@ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
u32 thr);
/**
+ * ieee80211_tx_rate_update - transmit rate update callback
+ *
+ * Drivers should call this functions with a non-NULL pub sta
+ * This function can be used in drivers that does not have provision
+ * in updating the tx rate in data path.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @pubsta: the station to update the tx rate for.
+ * @info: tx status information
+ */
+void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info);
+
+/**
* ieee80211_tx_status - transmit status callback
*
* Call this function for all transmitted frames after they have been
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0874f7fcd859..f58b384aa6c9 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -323,6 +323,7 @@ void __neigh_set_probe_once(struct neighbour *neigh);
bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f1db8e594847..4c1e99303b5a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -311,7 +311,7 @@ struct nla_policy {
#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
{ .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr }
-#define __NLA_ENSURE(condition) (sizeof(char[1 - 2*!(condition)]) - 1)
+#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_INT_TYPE(tp) \
(__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
tp == NLA_S16 || tp == NLA_U16 || \
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index f0e396ab9bec..ef1ed529f33c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -45,6 +45,7 @@ struct netns_sysctl_ipv6 {
int max_dst_opts_len;
int max_hbh_opts_len;
int seg6_flowlabel;
+ bool skip_notify_on_dev_down;
};
struct netns_ipv6 {
diff --git a/include/net/sock.h b/include/net/sock.h
index 7470c45d182d..2440f8b407eb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -422,8 +422,8 @@ struct sock {
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
- u32 sk_pacing_rate; /* bytes per second */
- u32 sk_max_pacing_rate;
+ unsigned long sk_pacing_rate; /* bytes per second */
+ unsigned long sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e950c2a68f06..0ed5d913a492 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2223,6 +2223,16 @@ struct rdma_netdev {
union ib_gid *gid, u16 mlid);
};
+struct rdma_netdev_alloc_params {
+ size_t sizeof_priv;
+ unsigned int txqs;
+ unsigned int rxqs;
+ void *param;
+
+ int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
+ struct net_device *netdev, void *param);
+};
+
struct ib_port_pkey_list {
/* Lock to hold while modifying the list. */
spinlock_t list_lock;
@@ -2523,8 +2533,8 @@ struct ib_device {
/**
* rdma netdev operation
*
- * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
- * doesn't support the specified rdma netdev type.
+ * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
+ * must return -EOPNOTSUPP if it doesn't support the specified type.
*/
struct net_device *(*alloc_rdma_netdev)(
struct ib_device *device,
@@ -2534,6 +2544,10 @@ struct ib_device {
unsigned char name_assign_type,
void (*setup)(struct net_device *));
+ int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+
struct module *owner;
struct device dev;
struct kobject *ports_parent;
@@ -4179,4 +4193,16 @@ struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs);
+
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *));
+
+int rdma_init_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ struct net_device *netdev);
+
#endif /* IB_VERBS_H */
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index eaaf56df4086..5b99cb2ea5ef 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -126,4 +126,12 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
*/
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+/**
+ * bman_is_probed - Check if bman is probed
+ *
+ * Returns 1 if the bman driver successfully probed, -1 if the bman driver
+ * failed to probe or 0 if the bman driver did not probed yet.
+ */
+int bman_is_probed(void);
+
#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index d4dfefdee6c1..597783b8a3a0 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1186,4 +1186,12 @@ int qman_alloc_cgrid_range(u32 *result, u32 count);
*/
int qman_release_cgrid(u32 id);
+/**
+ * qman_is_probed - Check if qman is probed
+ *
+ * Returns 1 if the qman driver successfully probed, -1 if the qman driver
+ * failed to probe or 0 if the qman driver did not probed yet.
+ */
+int qman_is_probed(void);
+
#endif /* __FSL_QMAN_H */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 837393fa897b..573d5b901fb1 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -931,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_fast_assign(
__entry->call = call_id;
memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ __entry->where = where;
),
TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h
index 75eed8b62823..7239aa9c0766 100644
--- a/include/uapi/linux/if_fddi.h
+++ b/include/uapi/linux/if_fddi.h
@@ -6,9 +6,10 @@
*
* Global definitions for the ANSI FDDI interface.
*
- * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004
+ * Version: @(#)if_fddi.h 1.0.3 Oct 6 2018
*
- * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ * Author: Lawrence V. Stefani, <stefani@yahoo.com>
+ * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org>
*
* if_fddi.h is based on previous if_ether.h and if_tr.h work by
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -45,7 +46,21 @@
#define FDDI_K_OUI_LEN 3 /* Octets in OUI in 802.2 SNAP
header */
-/* Define FDDI Frame Control (FC) Byte values */
+/* Define FDDI Frame Control (FC) Byte masks */
+#define FDDI_FC_K_CLASS_MASK 0x80 /* class bit */
+#define FDDI_FC_K_CLASS_SYNC 0x80
+#define FDDI_FC_K_CLASS_ASYNC 0x00
+#define FDDI_FC_K_ALEN_MASK 0x40 /* address length bit */
+#define FDDI_FC_K_ALEN_48 0x40
+#define FDDI_FC_K_ALEN_16 0x00
+#define FDDI_FC_K_FORMAT_MASK 0x30 /* format bits */
+#define FDDI_FC_K_FORMAT_FUTURE 0x30
+#define FDDI_FC_K_FORMAT_IMPLEMENTOR 0x20
+#define FDDI_FC_K_FORMAT_LLC 0x10
+#define FDDI_FC_K_FORMAT_MANAGEMENT 0x00
+#define FDDI_FC_K_CONTROL_MASK 0x0f /* control bits */
+
+/* Define FDDI Frame Control (FC) Byte specific values */
#define FDDI_FC_K_VOID 0x00
#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 58faab897201..1debfa42cba1 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -287,6 +287,7 @@ enum {
IFLA_BR_MCAST_STATS_ENABLED,
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
+ IFLA_BR_VLAN_STATS_PER_PORT,
__IFLA_BR_MAX,
};
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
index 4c292ecbb748..0a26a5576645 100644
--- a/include/uapi/linux/ncsi.h
+++ b/include/uapi/linux/ncsi.h
@@ -23,6 +23,9 @@
* optionally the preferred NCSI_ATTR_CHANNEL_ID.
* @NCSI_CMD_CLEAR_INTERFACE: clear any preferred package/channel combination.
* Requires NCSI_ATTR_IFINDEX.
+ * @NCSI_CMD_SEND_CMD: send NC-SI command to network card.
+ * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID
+ * and NCSI_ATTR_CHANNEL_ID.
* @NCSI_CMD_MAX: highest command number
*/
enum ncsi_nl_commands {
@@ -30,6 +33,7 @@ enum ncsi_nl_commands {
NCSI_CMD_PKG_INFO,
NCSI_CMD_SET_INTERFACE,
NCSI_CMD_CLEAR_INTERFACE,
+ NCSI_CMD_SEND_CMD,
__NCSI_CMD_AFTER_LAST,
NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
@@ -43,6 +47,7 @@ enum ncsi_nl_commands {
* @NCSI_ATTR_PACKAGE_LIST: nested array of NCSI_PKG_ATTR attributes
* @NCSI_ATTR_PACKAGE_ID: package ID
* @NCSI_ATTR_CHANNEL_ID: channel ID
+ * @NCSI_ATTR_DATA: command payload
* @NCSI_ATTR_MAX: highest attribute number
*/
enum ncsi_nl_attrs {
@@ -51,6 +56,7 @@ enum ncsi_nl_attrs {
NCSI_ATTR_PACKAGE_LIST,
NCSI_ATTR_PACKAGE_ID,
NCSI_ATTR_CHANNEL_ID,
+ NCSI_ATTR_DATA,
__NCSI_ATTR_AFTER_LAST,
NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index cfc94178d608..6d610bae30a9 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1033,6 +1033,9 @@
* %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
* address(specified in %NL80211_ATTR_MAC).
*
+ * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in
+ * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1245,6 +1248,8 @@ enum nl80211_commands {
NL80211_CMD_CONTROL_PORT_FRAME,
+ NL80211_CMD_GET_FTM_RESPONDER_STATS,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2241,6 +2246,14 @@ enum nl80211_commands {
* association request when used with NL80211_CMD_NEW_STATION). Can be set
* only if %NL80211_STA_FLAG_WME is set.
*
+ * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
+ * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
+ * measurement (FTM) responder functionality and containing parameters as
+ * possible, see &enum nl80211_ftm_responder_attr
+ *
+ * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
+ * statistics, see &enum nl80211_ftm_responder_stats.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2682,6 +2695,10 @@ enum nl80211_attrs {
NL80211_ATTR_HE_CAPABILITY,
+ NL80211_ATTR_FTM_RESPONDER,
+
+ NL80211_ATTR_FTM_RESPONDER_STATS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3051,6 +3068,12 @@ enum nl80211_sta_bss_param {
* @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
* @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm)
* @NL80211_STA_INFO_ACK_SIGNAL_AVG: avg signal strength of ACK frames (s8, dBm)
+ * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received
+ * with an FCS error (u32, from this station). This count may not include
+ * some packets with an FCS error due to TA corruption. Hence this counter
+ * might not be fully accurate.
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3091,6 +3114,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_PAD,
NL80211_STA_INFO_ACK_SIGNAL,
NL80211_STA_INFO_ACK_SIGNAL_AVG,
+ NL80211_STA_INFO_RX_MPDUS,
+ NL80211_STA_INFO_FCS_ERROR_COUNT,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -5225,6 +5250,8 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
* except for supported rates from the probe request content if requested
* by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
+ * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine
+ * timing measurement responder role.
*
* @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are
* able to rekey an in-use key correctly. Userspace must not rekey PTK keys
@@ -5269,6 +5296,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5808,4 +5836,74 @@ enum nl80211_external_auth_action {
NL80211_EXTERNAL_AUTH_ABORT,
};
+/**
+ * enum nl80211_ftm_responder_attributes - fine timing measurement
+ * responder attributes
+ * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid
+ * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled
+ * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element
+ * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10)
+ * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element
+ * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13)
+ * @__NL80211_FTM_RESP_ATTR_LAST: Internal
+ * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute.
+ */
+enum nl80211_ftm_responder_attributes {
+ __NL80211_FTM_RESP_ATTR_INVALID,
+
+ NL80211_FTM_RESP_ATTR_ENABLED,
+ NL80211_FTM_RESP_ATTR_LCI,
+ NL80211_FTM_RESP_ATTR_CIVICLOC,
+
+ /* keep last */
+ __NL80211_FTM_RESP_ATTR_LAST,
+ NL80211_FTM_RESP_ATTR_MAX = __NL80211_FTM_RESP_ATTR_LAST - 1,
+};
+
+/*
+ * enum nl80211_ftm_responder_stats - FTM responder statistics
+ *
+ * These attribute types are used with %NL80211_ATTR_FTM_RESPONDER_STATS
+ * when getting FTM responder statistics.
+ *
+ * @__NL80211_FTM_STATS_INVALID: attribute number 0 is reserved
+ * @NL80211_FTM_STATS_SUCCESS_NUM: number of FTM sessions in which all frames
+ * were ssfully answered (u32)
+ * @NL80211_FTM_STATS_PARTIAL_NUM: number of FTM sessions in which part of the
+ * frames were successfully answered (u32)
+ * @NL80211_FTM_STATS_FAILED_NUM: number of failed FTM sessions (u32)
+ * @NL80211_FTM_STATS_ASAP_NUM: number of ASAP sessions (u32)
+ * @NL80211_FTM_STATS_NON_ASAP_NUM: number of non-ASAP sessions (u32)
+ * @NL80211_FTM_STATS_TOTAL_DURATION_MSEC: total sessions durations - gives an
+ * indication of how much time the responder was busy (u64, msec)
+ * @NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM: number of unknown FTM triggers -
+ * triggers from initiators that didn't finish successfully the negotiation
+ * phase with the responder (u32)
+ * @NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM: number of FTM reschedule requests
+ * - initiator asks for a new scheduling although it already has scheduled
+ * FTM slot (u32)
+ * @NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM: number of FTM triggers out of
+ * scheduled window (u32)
+ * @NL80211_FTM_STATS_PAD: used for padding, ignore
+ * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal
+ * @NL80211_FTM_STATS_MAX: highest possible FTM responder stats attribute
+ */
+enum nl80211_ftm_responder_stats {
+ __NL80211_FTM_STATS_INVALID,
+ NL80211_FTM_STATS_SUCCESS_NUM,
+ NL80211_FTM_STATS_PARTIAL_NUM,
+ NL80211_FTM_STATS_FAILED_NUM,
+ NL80211_FTM_STATS_ASAP_NUM,
+ NL80211_FTM_STATS_NON_ASAP_NUM,
+ NL80211_FTM_STATS_TOTAL_DURATION_MSEC,
+ NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM,
+ NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM,
+ NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM,
+ NL80211_FTM_STATS_PAD,
+
+ /* keep last */
+ __NL80211_FTM_STATS_AFTER_LAST,
+ NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index ac9e8c96d9bd..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -18,14 +18,17 @@ struct smc_diag_req {
* on the internal clcsock, and more SMC-related socket data
*/
struct smc_diag_msg {
- __u8 diag_family;
- __u8 diag_state;
- __u8 diag_mode;
- __u8 diag_shutdown;
+ __u8 diag_family;
+ __u8 diag_state;
+ union {
+ __u8 diag_mode;
+ __u8 diag_fallback; /* the old name of the field */
+ };
+ __u8 diag_shutdown;
struct inet_diag_sockid id;
- __u32 diag_uid;
- __u64 diag_inode;
+ __u32 diag_uid;
+ __aligned_u64 diag_inode;
};
/* Mode of a connection */
@@ -99,11 +102,11 @@ struct smc_diag_fallback {
};
struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
- __u32 linkid; /* Link identifier */
- __u64 peer_gid; /* Peer GID */
- __u64 my_gid; /* My GID */
- __u64 token; /* Token of DMB */
- __u64 peer_token; /* Token of remote DMBE */
+ __u32 linkid; /* Link identifier */
+ __aligned_u64 peer_gid; /* Peer GID */
+ __aligned_u64 my_gid; /* My GID */
+ __aligned_u64 token; /* Token of DMB */
+ __aligned_u64 peer_token; /* Token of remote DMBE */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09d00f8c442b..09502de447f5 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -40,5 +40,6 @@ struct udphdr {
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
+#define UDP_ENCAP_RXRPC 6
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index aae10baf1902..4a3dae2a8283 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2836,11 +2836,12 @@ restart:
}
/**
- * cgroup_save_control - save control masks of a subtree
+ * cgroup_save_control - save control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
- * Save ->subtree_control and ->subtree_ss_mask to the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
*/
static void cgroup_save_control(struct cgroup *cgrp)
{
@@ -2850,6 +2851,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
dsct->old_subtree_control = dsct->subtree_control;
dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
+ dsct->old_dom_cgrp = dsct->dom_cgrp;
}
}
@@ -2875,11 +2877,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
}
/**
- * cgroup_restore_control - restore control masks of a subtree
+ * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
- * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
*/
static void cgroup_restore_control(struct cgroup *cgrp)
{
@@ -2889,6 +2892,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
dsct->subtree_control = dsct->old_subtree_control;
dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
+ dsct->dom_cgrp = dsct->old_dom_cgrp;
}
}
@@ -3196,6 +3200,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup *dom_cgrp = parent->dom_cgrp;
+ struct cgroup *dsct;
+ struct cgroup_subsys_state *d_css;
int ret;
lockdep_assert_held(&cgroup_mutex);
@@ -3225,12 +3231,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
*/
cgroup_save_control(cgrp);
- cgrp->dom_cgrp = dom_cgrp;
+ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
+ if (dsct == cgrp || cgroup_is_threaded(dsct))
+ dsct->dom_cgrp = dom_cgrp;
+
ret = cgroup_apply_control(cgrp);
if (!ret)
parent->nr_threaded_children++;
- else
- cgrp->dom_cgrp = cgrp;
cgroup_finalize_control(cgrp, ret);
return ret;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 5342f6fc022e..0bd595a0b610 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -63,6 +63,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
enum s2idle_states __read_mostly s2idle_state;
static DEFINE_RAW_SPINLOCK(s2idle_lock);
+bool pm_suspend_via_s2idle(void)
+{
+ return mem_sleep_current == PM_SUSPEND_TO_IDLE;
+}
+EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle);
+
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
{
lock_system_sleep();
diff --git a/lib/Makefile b/lib/Makefile
index ca3f7ebb900d..423876446810 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -119,7 +119,6 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
-CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
diff --git a/lib/bch.c b/lib/bch.c
index 7b0f2006698b..5db6d3a4c8a6 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -79,20 +79,19 @@
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
#define BCH_MAX_M (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T (CONFIG_BCH_CONST_T)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
-#define BCH_MAX_M 15
+#define BCH_MAX_M 15 /* 2KB */
+#define BCH_MAX_T 64 /* 64 bit correction */
#endif
-#define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
-
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
-#define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
@@ -202,6 +201,9 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const uint32_t * const tab3 = tab2 + 256*(l+1);
const uint32_t *pdata, *p0, *p1, *p2, *p3;
+ if (WARN_ON(r_bytes > sizeof(r)))
+ return;
+
if (ecc) {
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
@@ -1285,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
*/
goto fail;
+ if (t > BCH_MAX_T)
+ /*
+ * we can support larger than 64 bits if necessary, at the
+ * cost of higher stack usage.
+ */
+ goto fail;
+
/* sanity checks */
if ((t < 1) || (m*t >= ((1 << m)-1)))
/* invalid t value */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d5b3a3f95c01..812e59e13fe6 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2794,7 +2794,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
copy = end - str;
memcpy(str, args, copy);
str += len;
- args += len;
+ args += len + 1;
}
}
if (process)
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 25a5d87e2e4c..912aae5fa09e 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,7 +15,6 @@
* but they are bigger and use more memory for the lookup table.
*/
-#include <linux/crc32poly.h>
#include "xz_private.h"
/*
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 482b90f363fe..09360ebb510e 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -102,6 +102,10 @@
# endif
#endif
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 706a738c0aee..e2ef1c17942f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6193,15 +6193,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
}
-#ifdef CONFIG_NUMA_BALANCING
-static void pgdat_init_numabalancing(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->numabalancing_migrate_lock);
-}
-#else
-static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{
@@ -6226,7 +6217,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
{
pgdat_resize_init(pgdat);
- pgdat_init_numabalancing(pgdat);
pgdat_init_split_queue(pgdat);
pgdat_init_kcompactd(pgdat);
diff --git a/mm/percpu.c b/mm/percpu.c
index a749d4d96e3e..4b90682623e9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1212,6 +1212,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
+ pcpu_mem_free(chunk->md_blocks);
pcpu_mem_free(chunk->bound_map);
pcpu_mem_free(chunk->alloc_map);
pcpu_mem_free(chunk);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e5a5bc5d5232..3345f1984542 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1034,6 +1034,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
+ [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -1114,6 +1115,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (err)
return err;
}
+
+ if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
+ __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
+
+ err = br_vlan_set_stats_per_port(br, per_port);
+ if (err)
+ return err;
+ }
#endif
if (data[IFLA_BR_GROUP_FWD_MASK]) {
@@ -1327,6 +1336,7 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
#endif
nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
@@ -1417,7 +1427,9 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
- br_opt_get(br, BROPT_VLAN_STATS_ENABLED)))
+ br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
+ nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
+ br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
return -EMSGSIZE;
#endif
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 57229b9d800f..10ee39fdca5c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -320,6 +320,7 @@ enum net_bridge_opts {
BROPT_HAS_IPV6_ADDR,
BROPT_NEIGH_SUPPRESS_ENABLED,
BROPT_MTU_SET_BY_USER,
+ BROPT_VLAN_STATS_PER_PORT,
};
struct net_bridge {
@@ -859,6 +860,7 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
int br_vlan_set_stats(struct net_bridge *br, unsigned long val);
+int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c93c5724609e..60182bef6341 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -803,6 +803,22 @@ static ssize_t vlan_stats_enabled_store(struct device *d,
return store_bridge_parm(d, buf, len, br_vlan_set_stats);
}
static DEVICE_ATTR_RW(vlan_stats_enabled);
+
+static ssize_t vlan_stats_per_port_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_PER_PORT));
+}
+
+static ssize_t vlan_stats_per_port_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_vlan_set_stats_per_port);
+}
+static DEVICE_ATTR_RW(vlan_stats_per_port);
#endif
static struct attribute *bridge_attrs[] = {
@@ -856,6 +872,7 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_vlan_protocol.attr,
&dev_attr_default_pvid.attr,
&dev_attr_vlan_stats_enabled.attr,
+ &dev_attr_vlan_stats_per_port.attr,
#endif
NULL
};
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5942e03dd845..8c9297a01947 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -190,6 +190,19 @@ static void br_vlan_put_master(struct net_bridge_vlan *masterv)
}
}
+static void nbp_vlan_rcu_free(struct rcu_head *rcu)
+{
+ struct net_bridge_vlan *v;
+
+ v = container_of(rcu, struct net_bridge_vlan, rcu);
+ WARN_ON(br_vlan_is_master(v));
+ /* if we had per-port stats configured then free them here */
+ if (v->brvlan->stats != v->stats)
+ free_percpu(v->stats);
+ v->stats = NULL;
+ kfree(v);
+}
+
/* This is the shared VLAN add function which works for both ports and bridge
* devices. There are four possible calls to this function in terms of the
* vlan entry type:
@@ -245,7 +258,15 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
if (!masterv)
goto out_filt;
v->brvlan = masterv;
- v->stats = masterv->stats;
+ if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
+ v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+ if (!v->stats) {
+ err = -ENOMEM;
+ goto out_filt;
+ }
+ } else {
+ v->stats = masterv->stats;
+ }
} else {
err = br_switchdev_port_vlan_add(dev, v->vid, flags);
if (err && err != -EOPNOTSUPP)
@@ -282,6 +303,10 @@ out_filt:
if (p) {
__vlan_vid_del(dev, br, v->vid);
if (masterv) {
+ if (v->stats && masterv->stats != v->stats)
+ free_percpu(v->stats);
+ v->stats = NULL;
+
br_vlan_put_master(masterv);
v->brvlan = NULL;
}
@@ -329,7 +354,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params);
__vlan_del_list(v);
- kfree_rcu(v, rcu);
+ call_rcu(&v->rcu, nbp_vlan_rcu_free);
}
br_vlan_put_master(masterv);
@@ -830,6 +855,30 @@ int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
return 0;
}
+int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
+{
+ struct net_bridge_port *p;
+
+ /* allow to change the option if there are no port vlans configured */
+ list_for_each_entry(p, &br->port_list, list) {
+ struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+
+ if (vg->num_vlans)
+ return -EBUSY;
+ }
+
+ switch (val) {
+ case 0:
+ case 1:
+ br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
{
struct net_bridge_vlan *v;
diff --git a/net/core/dev.c b/net/core/dev.c
index ec96f50b0782..022ad73d6253 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1752,6 +1752,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+/**
+ * call_netdevice_notifiers_mtu - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ * @arg: additional u32 argument passed to the notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+ struct net_device *dev, u32 arg)
+{
+ struct netdev_notifier_info_ext info = {
+ .info.dev = dev,
+ .ext.mtu = arg,
+ };
+
+ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+ return call_netdevice_notifiers_info(val, &info.info);
+}
+
#ifdef CONFIG_NET_INGRESS
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
@@ -1954,6 +1976,17 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
return false;
}
+/**
+ * dev_nit_active - return true if any network interface taps are in use
+ *
+ * @dev: network device to check for the presence of taps
+ */
+bool dev_nit_active(struct net_device *dev)
+{
+ return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
+}
+EXPORT_SYMBOL_GPL(dev_nit_active);
+
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
@@ -3211,7 +3244,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
unsigned int len;
int rc;
- if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
+ if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
len = skb->len;
@@ -7589,14 +7622,16 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
err = __dev_set_mtu(dev, new_mtu);
if (!err) {
- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__dev_set_mtu(dev, orig_mtu);
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ new_mtu);
}
}
return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 6dae81d65d5c..3a4b29a13d31 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -3012,6 +3012,8 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
struct genl_info *info,
union devlink_param_value *value)
{
+ int len;
+
if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
!info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
return -EINVAL;
@@ -3027,10 +3029,13 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
break;
case DEVLINK_PARAM_TYPE_STRING:
- if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) >
- DEVLINK_PARAM_MAX_STRING_VALUE)
+ len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
+ nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+ if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+ len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
return -EINVAL;
- value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ strcpy(value->vstr,
+ nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
break;
case DEVLINK_PARAM_TYPE_BOOL:
value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
@@ -3117,7 +3122,10 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- param_item->driverinit_value = value;
+ if (param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, value.vstr);
+ else
+ param_item->driverinit_value = value;
param_item->driverinit_value_valid = true;
} else {
if (!param->set)
@@ -4557,7 +4565,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
DEVLINK_PARAM_CMODE_DRIVERINIT))
return -EOPNOTSUPP;
- *init_val = param_item->driverinit_value;
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(init_val->vstr, param_item->driverinit_value.vstr);
+ else
+ *init_val = param_item->driverinit_value;
return 0;
}
@@ -4588,7 +4599,10 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
DEVLINK_PARAM_CMODE_DRIVERINIT))
return -EOPNOTSUPP;
- param_item->driverinit_value = init_val;
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+ else
+ param_item->driverinit_value = init_val;
param_item->driverinit_value_valid = true;
devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
@@ -4621,6 +4635,23 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
EXPORT_SYMBOL_GPL(devlink_param_value_changed);
/**
+ * devlink_param_value_str_fill - Safely fill-up the string preventing
+ * from overflow of the preallocated buffer
+ *
+ * @dst_val: destination devlink_param_value
+ * @src: source buffer
+ */
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+ size_t len;
+
+ len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE);
+ WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE);
+}
+EXPORT_SYMBOL_GPL(devlink_param_value_str_fill);
+
+/**
* devlink_region_create - create a new address region
*
* @devlink: devlink
diff --git a/net/core/filter.c b/net/core/filter.c
index 2e1476536b92..1a3ac6c46873 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3802,8 +3802,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
- case SO_MAX_PACING_RATE:
- sk->sk_max_pacing_rate = val;
+ case SO_MAX_PACING_RATE: /* 32bit version */
+ sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
break;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index dc1389b8beb1..69c41cb3966d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -232,7 +232,8 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
}
}
-static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
+ bool skip_perm)
{
int i;
struct neigh_hash_table *nht;
@@ -250,6 +251,10 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
np = &n->next;
continue;
}
+ if (skip_perm && n->nud_state & NUD_PERMANENT) {
+ np = &n->next;
+ continue;
+ }
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
@@ -285,21 +290,35 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
- neigh_flush_dev(tbl, dev);
+ neigh_flush_dev(tbl, dev, false);
write_unlock_bh(&tbl->lock);
}
EXPORT_SYMBOL(neigh_changeaddr);
-int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
+ bool skip_perm)
{
write_lock_bh(&tbl->lock);
- neigh_flush_dev(tbl, dev);
+ neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
return 0;
}
+
+int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
+{
+ __neigh_ifdown(tbl, dev, true);
+ return 0;
+}
+EXPORT_SYMBOL(neigh_carrier_down);
+
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ __neigh_ifdown(tbl, dev, false);
+ return 0;
+}
EXPORT_SYMBOL(neigh_ifdown);
static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index c894c4af8981..0958c7be2c22 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -59,7 +59,7 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
-#define RTNL_MAX_TYPE 48
+#define RTNL_MAX_TYPE 49
#define RTNL_SLAVE_MAX_TYPE 36
struct rtnl_link {
@@ -3857,7 +3857,6 @@ static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
int *br_idx, int *brport_idx,
struct netlink_ext_ack *extack)
{
- struct ifinfomsg *ifm = nlmsg_data(nlh);
struct nlattr *tb[IFLA_MAX+1];
int err;
@@ -3871,6 +3870,8 @@ static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
(nlmsg_len(nlh) != sizeof(struct ndmsg) +
nla_attr_size(sizeof(u32)))) {
+ struct ifinfomsg *ifm;
+
err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
ifla_policy, extack);
if (err < 0) {
@@ -3880,6 +3881,7 @@ static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
*br_idx = nla_get_u32(tb[IFLA_MASTER]);
}
+ ifm = nlmsg_data(nlh);
*brport_idx = ifm->ifi_index;
}
return 0;
@@ -4775,8 +4777,8 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
ifsm = nlmsg_data(cb->nlh);
- /* only requests using NLM_F_DUMP_PROPER_HDR can pass data to
- * influence the dump. The legacy exception is filter_mask.
+ /* only requests using strict checks can pass data to influence
+ * the dump. The legacy exception is filter_mask.
*/
if (cb->strict_check) {
if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0e937d3d85b5..54b961de9538 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4394,14 +4394,16 @@ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
*/
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
- if (unlikely(start > skb_headlen(skb)) ||
- unlikely((int)start + off > skb_headlen(skb) - 2)) {
- net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
- start, off, skb_headlen(skb));
+ u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
+ u32 csum_start = skb_headroom(skb) + (u32)start;
+
+ if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
+ net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
+ start, off, skb_headroom(skb), skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_headroom(skb) + start;
+ skb->csum_start = csum_start;
skb->csum_offset = off;
skb_set_transport_header(skb, start);
return true;
diff --git a/net/core/sock.c b/net/core/sock.c
index 52e4f1c16b1e..6fcc4bc07d19 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -998,7 +998,7 @@ set_rcvbuf:
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
- sk->sk_max_pacing_rate = val;
+ sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
break;
@@ -1336,7 +1336,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
#endif
case SO_MAX_PACING_RATE:
- v.val = sk->sk_max_pacing_rate;
+ /* 32bit version */
+ v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
break;
case SO_INCOMING_CPU:
@@ -2749,8 +2750,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
- sk->sk_max_pacing_rate = ~0U;
- sk->sk_pacing_rate = ~0U;
+ sk->sk_max_pacing_rate = ~0UL;
+ sk->sk_pacing_rate = ~0UL;
sk->sk_pacing_shift = 10;
sk->sk_incoming_cpu = -1;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index e90c89ef8c08..850a6f13a082 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1255,6 +1255,8 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event,
change_info = ptr;
if (change_info->flags_changed & IFF_NOARP)
neigh_changeaddr(&arp_tbl, dev);
+ if (!netif_carrier_ok(dev))
+ neigh_carrier_down(&arp_tbl, dev);
break;
default:
break;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 038f511c73fa..0f1beceb47d5 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1291,7 +1291,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *upper_info = ptr;
+ struct netdev_notifier_info_ext *info_ext = ptr;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
@@ -1326,16 +1327,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
- /* fall through */
+ rt_cache_flush(net);
+ break;
case NETDEV_CHANGEMTU:
+ fib_sync_mtu(dev, info_ext->ext.mtu);
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
- info = ptr;
+ upper_info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ if (upper_info->upper_dev &&
+ netif_is_l3_master(upper_info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f8c7ec8171a8..b5c3937ca6ec 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1457,6 +1457,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
return NOTIFY_DONE;
}
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ * larger MTUs on the path. With that limit raised, we can now
+ * discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+ struct fnhe_hash_bucket *bucket;
+ int i;
+
+ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+ fnhe;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+ if (fnhe->fnhe_mtu_locked) {
+ if (new <= fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ fnhe->fnhe_mtu_locked = false;
+ }
+ } else if (new < fnhe->fnhe_pmtu ||
+ orig == fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ }
+ }
+ }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+ struct hlist_head *head = &fib_info_devhash[hash];
+ struct fib_nh *nh;
+
+ hlist_for_each_entry(nh, head, nh_hash) {
+ if (nh->nh_dev == dev)
+ nh_update_mtu(nh, dev->mtu, orig_mtu);
+ }
+}
+
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f71d2395c428..c0a9d26c06ce 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1001,21 +1001,22 @@ out: kfree_skb(skb);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
+ u32 old_mtu = ipv4_mtu(dst);
struct fib_result res;
bool lock = false;
if (ip_mtu_locked(dst))
return;
- if (ipv4_mtu(dst) < mtu)
+ if (old_mtu < mtu)
return;
if (mtu < ip_rt_min_pmtu) {
lock = true;
- mtu = ip_rt_min_pmtu;
+ mtu = min(old_mtu, ip_rt_min_pmtu);
}
- if (rt->rt_pmtu == mtu &&
+ if (rt->rt_pmtu == mtu && !lock &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
return;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 43ef83b2330e..b8ba8fa34eff 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3111,10 +3111,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
+ unsigned long rate;
u32 now;
u64 rate64;
bool slow;
- u32 rate;
memset(info, 0, sizeof(*info));
if (sk->sk_type != SOCK_STREAM)
@@ -3124,11 +3124,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
/* Report meaningful fields for all TCP states, including listeners */
rate = READ_ONCE(sk->sk_pacing_rate);
- rate64 = rate != ~0U ? rate : ~0ULL;
+ rate64 = (rate != ~0UL) ? rate : ~0ULL;
info->tcpi_pacing_rate = rate64;
rate = READ_ONCE(sk->sk_max_pacing_rate);
- rate64 = rate != ~0U ? rate : ~0ULL;
+ rate64 = (rate != ~0UL) ? rate : ~0ULL;
info->tcpi_max_pacing_rate = rate64;
info->tcpi_reordering = tp->reordering;
@@ -3254,8 +3254,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *stats;
struct tcp_info info;
+ unsigned long rate;
u64 rate64;
- u32 rate;
stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
if (!stats)
@@ -3274,7 +3274,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
tp->total_retrans, TCP_NLA_PAD);
rate = READ_ONCE(sk->sk_pacing_rate);
- rate64 = rate != ~0U ? rate : ~0ULL;
+ rate64 = (rate != ~0UL) ? rate : ~0ULL;
nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
rate64 = tcp_compute_delivery_rate(tp);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index a5786e3e2c16..b88081285fd1 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -129,7 +129,7 @@ static const u32 bbr_probe_rtt_mode_ms = 200;
static const int bbr_min_tso_rate = 1200000;
/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. */
-static const int bbr_pacing_marging_percent = 1;
+static const int bbr_pacing_margin_percent = 1;
/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
* that will allow a smoothly increasing pacing rate that will double each RTT
@@ -214,12 +214,12 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
rate *= mss;
rate *= gain;
rate >>= BBR_SCALE;
- rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_marging_percent);
+ rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
return rate >> BW_SCALE;
}
/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
-static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
+static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
{
u64 rate = bw;
@@ -258,7 +258,7 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
+ unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
bbr_init_pacing_rate_from_rtt(sk);
@@ -280,7 +280,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
/* Sort of tcp_tso_autosize() but ignoring
* driver provided sk_gso_max_size.
*/
- bytes = min_t(u32, sk->sk_pacing_rate >> sk->sk_pacing_shift,
+ bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift,
GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 06fbe102a425..37eebd910396 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -146,7 +146,7 @@ static void tcp_cdg_hystart_update(struct sock *sk)
return;
if (hystart_detect & HYSTART_ACK_TRAIN) {
- u32 now_us = div_u64(local_clock(), NSEC_PER_USEC);
+ u32 now_us = tp->tcp_mstamp;
if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
ca->last_ack = now_us;
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index ca61e2a659e7..cd4814f7e962 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -44,6 +44,7 @@
#include <linux/mm.h>
#include <net/tcp.h>
#include <linux/inet_diag.h>
+#include "tcp_dctcp.h"
#define DCTCP_MAX_ALPHA 1024U
@@ -118,54 +119,6 @@ static u32 dctcp_ssthresh(struct sock *sk)
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
-/* Minimal DCTP CE state machine:
- *
- * S: 0 <- last pkt was non-CE
- * 1 <- last pkt was CE
- */
-
-static void dctcp_ce_state_0_to_1(struct sock *sk)
-{
- struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (!ca->ce_state) {
- /* State has changed from CE=0 to CE=1, force an immediate
- * ACK to reflect the new CE state. If an ACK was delayed,
- * send that first to reflect the prior CE state.
- */
- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
- __tcp_send_ack(sk, ca->prior_rcv_nxt);
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
- }
-
- ca->prior_rcv_nxt = tp->rcv_nxt;
- ca->ce_state = 1;
-
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-}
-
-static void dctcp_ce_state_1_to_0(struct sock *sk)
-{
- struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (ca->ce_state) {
- /* State has changed from CE=1 to CE=0, force an immediate
- * ACK to reflect the new CE state. If an ACK was delayed,
- * send that first to reflect the prior CE state.
- */
- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
- __tcp_send_ack(sk, ca->prior_rcv_nxt);
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
- }
-
- ca->prior_rcv_nxt = tp->rcv_nxt;
- ca->ce_state = 0;
-
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-}
-
static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -230,12 +183,12 @@ static void dctcp_state(struct sock *sk, u8 new_state)
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
+ struct dctcp *ca = inet_csk_ca(sk);
+
switch (ev) {
case CA_EVENT_ECN_IS_CE:
- dctcp_ce_state_0_to_1(sk);
- break;
case CA_EVENT_ECN_NO_CE:
- dctcp_ce_state_1_to_0(sk);
+ dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
break;
default:
/* Don't care for the rest. */
diff --git a/net/ipv4/tcp_dctcp.h b/net/ipv4/tcp_dctcp.h
new file mode 100644
index 000000000000..d69a77cbd0c7
--- /dev/null
+++ b/net/ipv4/tcp_dctcp.h
@@ -0,0 +1,40 @@
+#ifndef _TCP_DCTCP_H
+#define _TCP_DCTCP_H
+
+static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (ce_state == 1)
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ else
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+}
+
+/* Minimal DCTP CE state machine:
+ *
+ * S: 0 <- last pkt was non-CE
+ * 1 <- last pkt was CE
+ */
+static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
+ u32 *prior_rcv_nxt, u32 *ce_state)
+{
+ u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
+
+ if (*ce_state != new_ce_state) {
+ /* CE state has changed, force an immediate ACK to
+ * reflect the new CE state. If an ACK was delayed,
+ * send that first to reflect the prior CE state.
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
+ dctcp_ece_ack_cwr(sk, *ce_state);
+ __tcp_send_ack(sk, *prior_rcv_nxt);
+ }
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ }
+ *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
+ *ce_state = new_ce_state;
+ dctcp_ece_ack_cwr(sk, new_ce_state);
+}
+
+#endif
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 059b67af28b1..d212e4cbc689 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -52,9 +52,8 @@ void tcp_mstamp_refresh(struct tcp_sock *tp)
{
u64 val = tcp_clock_ns();
- /* departure time for next data packet */
- if (val > tp->tcp_wstamp_ns)
- tp->tcp_wstamp_ns = val;
+ if (val > tp->tcp_clock_cache)
+ tp->tcp_clock_cache = val;
val = div_u64(val, NSEC_PER_USEC);
if (val > tp->tcp_mstamp)
@@ -976,32 +975,26 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void tcp_internal_pacing(struct sock *sk)
-{
- if (!tcp_needs_internal_pacing(sk))
- return;
- hrtimer_start(&tcp_sk(sk)->pacing_timer,
- ns_to_ktime(tcp_sk(sk)->tcp_wstamp_ns),
- HRTIMER_MODE_ABS_PINNED_SOFT);
- sock_hold(sk);
-}
-
-static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb)
+static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
+ u64 prior_wstamp)
{
struct tcp_sock *tp = tcp_sk(sk);
skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
if (sk->sk_pacing_status != SK_PACING_NONE) {
- u32 rate = sk->sk_pacing_rate;
+ unsigned long rate = sk->sk_pacing_rate;
/* Original sch_fq does not pace first 10 MSS
* Note that tp->data_segs_out overflows after 2^32 packets,
* this is a minor annoyance.
*/
- if (rate != ~0U && rate && tp->data_segs_out >= 10) {
- tp->tcp_wstamp_ns += div_u64((u64)skb->len * NSEC_PER_SEC, rate);
+ if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
+ u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
+ u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
- tcp_internal_pacing(sk);
+ /* take into account OS jitter */
+ len_ns -= min_t(u64, len_ns / 2, credit);
+ tp->tcp_wstamp_ns += len_ns;
}
}
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
@@ -1030,6 +1023,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct sk_buff *oskb = NULL;
struct tcp_md5sig_key *md5;
struct tcphdr *th;
+ u64 prior_wstamp;
int err;
BUG_ON(!skb || !tcp_skb_pcount(skb));
@@ -1050,6 +1044,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
if (unlikely(!skb))
return -ENOBUFS;
}
+
+ prior_wstamp = tp->tcp_wstamp_ns;
+ tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
+
skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
inet = inet_sk(sk);
@@ -1166,7 +1164,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
err = net_xmit_eval(err);
}
if (!err && oskb) {
- tcp_update_skb_after_send(sk, oskb);
+ tcp_update_skb_after_send(sk, oskb, prior_wstamp);
tcp_rate_skb_sent(sk, oskb);
}
return err;
@@ -1701,8 +1699,9 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
{
u32 bytes, segs;
- bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
- sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
+ bytes = min_t(unsigned long,
+ sk->sk_pacing_rate >> sk->sk_pacing_shift,
+ sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
* not one big TSO packet every 100 ms.
@@ -2175,10 +2174,23 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
-static bool tcp_pacing_check(const struct sock *sk)
+static bool tcp_pacing_check(struct sock *sk)
{
- return tcp_needs_internal_pacing(sk) &&
- hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!tcp_needs_internal_pacing(sk))
+ return false;
+
+ if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
+ return false;
+
+ if (!hrtimer_is_queued(&tp->pacing_timer)) {
+ hrtimer_start(&tp->pacing_timer,
+ ns_to_ktime(tp->tcp_wstamp_ns),
+ HRTIMER_MODE_ABS_PINNED_SOFT);
+ sock_hold(sk);
+ }
+ return true;
}
/* TCP Small Queues :
@@ -2195,10 +2207,12 @@ static bool tcp_pacing_check(const struct sock *sk)
static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
unsigned int factor)
{
- unsigned int limit;
+ unsigned long limit;
- limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
- limit = min_t(u32, limit,
+ limit = max_t(unsigned long,
+ 2 * skb->truesize,
+ sk->sk_pacing_rate >> sk->sk_pacing_shift);
+ limit = min_t(unsigned long, limit,
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
limit <<= factor;
@@ -2315,7 +2329,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp" is used as a start point for the retransmit timer */
- tcp_update_skb_after_send(sk, skb);
+ tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
goto repair; /* Skip network transmission */
}
@@ -2890,7 +2904,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
} tcp_skb_tsorted_restore(skb);
if (!err) {
- tcp_update_skb_after_send(sk, skb);
+ tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
tcp_rate_skb_sent(sk, skb);
}
} else {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 61023d50cd60..676020663ce8 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -360,7 +360,7 @@ static void tcp_probe_timer(struct sock *sk)
*/
start_ts = tcp_skb_timestamp(skb);
if (!start_ts)
- skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
+ skb->skb_mstamp_ns = tp->tcp_clock_cache;
else if (icsk->icsk_user_timeout &&
(s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1bec2203d558..cf8252d05a01 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1627,7 +1627,7 @@ busy_check:
*err = error;
return NULL;
}
-EXPORT_SYMBOL_GPL(__skb_recv_udp);
+EXPORT_SYMBOL(__skb_recv_udp);
/*
* This should be easy, if there is something there we
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e14d244c551f..0783af11b0b7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -47,6 +47,7 @@ struct fib6_cleaner {
int (*func)(struct fib6_info *, void *arg);
int sernum;
void *arg;
+ bool skip_notify;
};
#ifdef CONFIG_IPV6_SUBTREES
@@ -194,6 +195,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
*ppcpu_rt = NULL;
}
}
+
+ free_percpu(f6i->rt6i_pcpu);
}
lwtstate_put(f6i->fib6_nh.nh_lwtstate);
@@ -1956,6 +1959,7 @@ static int fib6_clean_node(struct fib6_walker *w)
struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w);
struct nl_info info = {
.nl_net = c->net,
+ .skip_notify = c->skip_notify,
};
if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
@@ -2007,7 +2011,7 @@ static int fib6_clean_node(struct fib6_walker *w)
static void fib6_clean_tree(struct net *net, struct fib6_node *root,
int (*func)(struct fib6_info *, void *arg),
- int sernum, void *arg)
+ int sernum, void *arg, bool skip_notify)
{
struct fib6_cleaner c;
@@ -2019,13 +2023,14 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
c.sernum = sernum;
c.arg = arg;
c.net = net;
+ c.skip_notify = skip_notify;
fib6_walk(net, &c.w);
}
static void __fib6_clean_all(struct net *net,
int (*func)(struct fib6_info *, void *),
- int sernum, void *arg)
+ int sernum, void *arg, bool skip_notify)
{
struct fib6_table *table;
struct hlist_head *head;
@@ -2037,7 +2042,7 @@ static void __fib6_clean_all(struct net *net,
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
spin_lock_bh(&table->tb6_lock);
fib6_clean_tree(net, &table->tb6_root,
- func, sernum, arg);
+ func, sernum, arg, skip_notify);
spin_unlock_bh(&table->tb6_lock);
}
}
@@ -2047,14 +2052,21 @@ static void __fib6_clean_all(struct net *net,
void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *),
void *arg)
{
- __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg);
+ __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, false);
+}
+
+void fib6_clean_all_skip_notify(struct net *net,
+ int (*func)(struct fib6_info *, void *),
+ void *arg)
+{
+ __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, true);
}
static void fib6_flush_trees(struct net *net)
{
int new_sernum = fib6_new_sernum(net);
- __fib6_clean_all(net, NULL, new_sernum, NULL);
+ __fib6_clean_all(net, NULL, new_sernum, NULL, false);
}
/*
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 51863ada15a4..a25cfdd47c89 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1784,6 +1784,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
change_info = ptr;
if (change_info->flags_changed & IFF_NOARP)
neigh_changeaddr(&nd_tbl, dev);
+ if (!netif_carrier_ok(dev))
+ neigh_carrier_down(&nd_tbl, dev);
break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7c38e0e058ae..f4e08b0689a8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4026,8 +4026,12 @@ void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
.event = event,
},
};
+ struct net *net = dev_net(dev);
- fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
+ if (net->ipv6.sysctl.skip_notify_on_dev_down)
+ fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
+ else
+ fib6_clean_all(net, fib6_ifdown, &arg);
}
void rt6_disable_ip(struct net_device *dev, unsigned long event)
@@ -5031,7 +5035,10 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
return 0;
}
-struct ctl_table ipv6_route_table_template[] = {
+static int zero;
+static int one = 1;
+
+static struct ctl_table ipv6_route_table_template[] = {
{
.procname = "flush",
.data = &init_net.ipv6.sysctl.flush_delay,
@@ -5102,6 +5109,15 @@ struct ctl_table ipv6_route_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
+ {
+ .procname = "skip_notify_on_dev_down",
+ .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
{ }
};
@@ -5125,6 +5141,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
+ table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
@@ -5189,6 +5206,7 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
+ net->ipv6.sysctl.skip_notify_on_dev_down = 0;
net->ipv6.ip6_rt_gc_expire = 30*HZ;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 76e30f4797fb..f869e35d0974 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -27,20 +27,6 @@ config MAC80211_RC_MINSTREL
---help---
This option enables the 'minstrel' TX rate control algorithm
-config MAC80211_RC_MINSTREL_HT
- bool "Minstrel 802.11n support" if EXPERT
- depends on MAC80211_RC_MINSTREL
- default y
- ---help---
- This option enables the 'minstrel_ht' TX rate control algorithm
-
-config MAC80211_RC_MINSTREL_VHT
- bool "Minstrel 802.11ac support" if EXPERT
- depends on MAC80211_RC_MINSTREL_HT
- default n
- ---help---
- This option enables VHT in the 'minstrel_ht' TX rate control algorithm
-
choice
prompt "Default rate control algorithm"
depends on MAC80211_HAS_RC
@@ -62,8 +48,7 @@ endchoice
config MAC80211_RC_DEFAULT
string
- default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT
- default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL
+ default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL
default ""
endif
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index bb707789ef2b..4f03ebe732fa 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -53,13 +53,14 @@ mac80211-$(CONFIG_PM) += pm.o
CFLAGS_trace.o := -I$(src)
-rc80211_minstrel-y := rc80211_minstrel.o
-rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o
+rc80211_minstrel-y := \
+ rc80211_minstrel.o \
+ rc80211_minstrel_ht.o
-rc80211_minstrel_ht-y := rc80211_minstrel_ht.o
-rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
+rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += \
+ rc80211_minstrel_debugfs.o \
+ rc80211_minstrel_ht_debugfs.o
mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
-mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
ccflags-y += -DDEBUG
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 914aef7e7afd..51622333d460 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -790,6 +790,48 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_set_ftm_responder_params(
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *lci, size_t lci_len,
+ const u8 *civicloc, size_t civicloc_len)
+{
+ struct ieee80211_ftm_responder_params *new, *old;
+ struct ieee80211_bss_conf *bss_conf;
+ u8 *pos;
+ int len;
+
+ if ((!lci || !lci_len) && (!civicloc || !civicloc_len))
+ return 1;
+
+ bss_conf = &sdata->vif.bss_conf;
+ old = bss_conf->ftmr_params;
+ len = lci_len + civicloc_len;
+
+ new = kzalloc(sizeof(*new) + len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ pos = (u8 *)(new + 1);
+ if (lci_len) {
+ new->lci_len = lci_len;
+ new->lci = pos;
+ memcpy(pos, lci, lci_len);
+ pos += lci_len;
+ }
+
+ if (civicloc_len) {
+ new->civicloc_len = civicloc_len;
+ new->civicloc = pos;
+ memcpy(pos, civicloc, civicloc_len);
+ pos += civicloc_len;
+ }
+
+ bss_conf->ftmr_params = new;
+ kfree(old);
+
+ return 0;
+}
+
static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
struct cfg80211_beacon_data *params,
const struct ieee80211_csa_settings *csa)
@@ -863,6 +905,20 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
if (err == 0)
changed |= BSS_CHANGED_AP_PROBE_RESP;
+ if (params->ftm_responder != -1) {
+ sdata->vif.bss_conf.ftm_responder = params->ftm_responder;
+ err = ieee80211_set_ftm_responder_params(sdata,
+ params->lci,
+ params->lci_len,
+ params->civicloc,
+ params->civicloc_len);
+
+ if (err < 0)
+ return err;
+
+ changed |= BSS_CHANGED_FTM_RESPONDER;
+ }
+
rcu_assign_pointer(sdata->u.ap.beacon, new);
if (old)
@@ -1063,6 +1119,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old_probe_resp, rcu_head);
sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
+ kfree(sdata->vif.bss_conf.ftmr_params);
+ sdata->vif.bss_conf.ftmr_params = NULL;
+
__sta_info_flush(sdata, true);
ieee80211_free_keys(sdata, true);
@@ -2875,6 +2934,20 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
pos += beacon->probe_resp_len;
}
+ if (beacon->ftm_responder)
+ new_beacon->ftm_responder = beacon->ftm_responder;
+ if (beacon->lci) {
+ new_beacon->lci_len = beacon->lci_len;
+ new_beacon->lci = pos;
+ memcpy(pos, beacon->lci, beacon->lci_len);
+ pos += beacon->lci_len;
+ }
+ if (beacon->civicloc) {
+ new_beacon->civicloc_len = beacon->civicloc_len;
+ new_beacon->civicloc = pos;
+ memcpy(pos, beacon->civicloc, beacon->civicloc_len);
+ pos += beacon->civicloc_len;
+ }
return new_beacon;
}
@@ -3765,6 +3838,17 @@ out:
return ret;
}
+static int
+ieee80211_get_ftm_responder_stats(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ return drv_get_ftm_responder_stats(local, sdata, ftm_stats);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3859,4 +3943,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.set_multicast_to_unicast = ieee80211_set_multicast_to_unicast,
.tx_control_port = ieee80211_tx_control_port,
.get_txq_stats = ieee80211_get_txq_stats,
+ .get_ftm_responder_stats = ieee80211_get_ftm_responder_stats,
};
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index e42c641b6190..0b1747a2313d 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1183,6 +1183,22 @@ static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local,
return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb);
}
+static inline int
+drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+{
+ u32 ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+ &sdata->vif,
+ ftm_stats);
+ trace_drv_get_ftm_responder_stats(local, sdata, ftm_stats);
+
+ return ret;
+}
+
static inline int drv_start_nan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_nan_conf *conf)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f40a2167935f..10a05062e4a0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -377,6 +377,7 @@ struct ieee80211_mgd_auth_data {
u8 key[WLAN_KEY_LEN_WEP104];
u8 key_len, key_idx;
bool done;
+ bool peer_confirmed;
bool timeout_started;
u16 sae_trans, sae_status;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 77381017bac7..83e71e6b2ebe 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1203,8 +1203,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
continue;
sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL);
- if (!sband)
+ if (!sband) {
+ result = -ENOMEM;
goto fail_rate;
+ }
wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n",
band);
@@ -1373,18 +1375,12 @@ static int __init ieee80211_init(void)
if (ret)
return ret;
- ret = rc80211_minstrel_ht_init();
- if (ret)
- goto err_minstrel;
-
ret = ieee80211_iface_init();
if (ret)
goto err_netdev;
return 0;
err_netdev:
- rc80211_minstrel_ht_exit();
- err_minstrel:
rc80211_minstrel_exit();
return ret;
@@ -1392,7 +1388,6 @@ static int __init ieee80211_init(void)
static void __exit ieee80211_exit(void)
{
- rc80211_minstrel_ht_exit();
rc80211_minstrel_exit();
ieee80211s_stop();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 89dac799a85f..d2bc8d57c87e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2761,13 +2761,40 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
auth_data->key_idx, tx_flags);
}
+static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sta_info *sta;
+
+ sdata_info(sdata, "authenticated\n");
+ ifmgd->auth_data->done = true;
+ ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
+ ifmgd->auth_data->timeout_started = true;
+ run_again(sdata, ifmgd->auth_data->timeout);
+
+ /* move station state to auth */
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get(sdata, bssid);
+ if (!sta) {
+ WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
+ return false;
+ }
+ if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
+ sdata_info(sdata, "failed moving %pM to auth\n", bssid);
+ return false;
+ }
+ mutex_unlock(&sdata->local->sta_mtx);
+
+ return true;
+}
+
static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 bssid[ETH_ALEN];
u16 auth_alg, auth_transaction, status_code;
- struct sta_info *sta;
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = AUTH_EVENT,
@@ -2791,7 +2818,11 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
status_code = le16_to_cpu(mgmt->u.auth.status_code);
if (auth_alg != ifmgd->auth_data->algorithm ||
- auth_transaction != ifmgd->auth_data->expected_transaction) {
+ (auth_alg != WLAN_AUTH_SAE &&
+ auth_transaction != ifmgd->auth_data->expected_transaction) ||
+ (auth_alg == WLAN_AUTH_SAE &&
+ (auth_transaction < ifmgd->auth_data->expected_transaction ||
+ auth_transaction > 2))) {
sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n",
mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
auth_transaction,
@@ -2834,35 +2865,17 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
event.u.mlme.status = MLME_SUCCESS;
drv_event_callback(sdata->local, sdata, &event);
- sdata_info(sdata, "authenticated\n");
- ifmgd->auth_data->done = true;
- ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
- ifmgd->auth_data->timeout_started = true;
- run_again(sdata, ifmgd->auth_data->timeout);
-
- if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
- ifmgd->auth_data->expected_transaction != 2) {
- /*
- * Report auth frame to user space for processing since another
- * round of Authentication frames is still needed.
- */
- cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
- return;
+ if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE ||
+ (auth_transaction == 2 &&
+ ifmgd->auth_data->expected_transaction == 2)) {
+ if (!ieee80211_mark_sta_auth(sdata, bssid))
+ goto out_err;
+ } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
+ auth_transaction == 2) {
+ sdata_info(sdata, "SAE peer confirmed\n");
+ ifmgd->auth_data->peer_confirmed = true;
}
- /* move station state to auth */
- mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get(sdata, bssid);
- if (!sta) {
- WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
- goto out_err;
- }
- if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
- sdata_info(sdata, "failed moving %pM to auth\n", bssid);
- goto out_err;
- }
- mutex_unlock(&sdata->local->sta_mtx);
-
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
return;
out_err:
@@ -4878,6 +4891,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_auth_data *auth_data;
u16 auth_alg;
int err;
+ bool cont_auth;
/* prepare auth data structure */
@@ -4912,6 +4926,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return -EOPNOTSUPP;
}
+ if (ifmgd->assoc_data)
+ return -EBUSY;
+
auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req->ie_len, GFP_KERNEL);
if (!auth_data)
@@ -4931,6 +4948,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
auth_data->data_len += req->auth_data_len - 4;
}
+ /* Check if continuing authentication or trying to authenticate with the
+ * same BSS that we were in the process of authenticating with and avoid
+ * removal and re-addition of the STA entry in
+ * ieee80211_prep_connection().
+ */
+ cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss;
+
if (req->ie && req->ie_len) {
memcpy(&auth_data->data[auth_data->data_len],
req->ie, req->ie_len);
@@ -4947,18 +4971,26 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
/* try to authenticate/probe */
- if ((ifmgd->auth_data && !ifmgd->auth_data->done) ||
- ifmgd->assoc_data) {
- err = -EBUSY;
- goto err_free;
+ if (ifmgd->auth_data) {
+ if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) {
+ auth_data->peer_confirmed =
+ ifmgd->auth_data->peer_confirmed;
+ }
+ ieee80211_destroy_auth_data(sdata, cont_auth);
}
- if (ifmgd->auth_data)
- ieee80211_destroy_auth_data(sdata, false);
-
/* prep auth_data so we don't go into idle on disassoc */
ifmgd->auth_data = auth_data;
+ /* If this is continuation of an ongoing SAE authentication exchange
+ * (i.e., request to send SAE Confirm) and the peer has already
+ * confirmed, mark authentication completed since we are about to send
+ * out SAE Confirm.
+ */
+ if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE &&
+ auth_data->peer_confirmed && auth_data->sae_trans == 2)
+ ieee80211_mark_sta_auth(sdata, req->bss->bssid);
+
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -4976,7 +5008,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
- err = ieee80211_prep_connection(sdata, req->bss, false, false);
+ err = ieee80211_prep_connection(sdata, req->bss, cont_auth, false);
if (err)
goto err_clear;
@@ -4997,7 +5029,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
- err_free:
kfree(auth_data);
return err;
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 8212bfeb71d6..d59198191a79 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -95,18 +95,5 @@ static inline void rc80211_minstrel_exit(void)
}
#endif
-#ifdef CONFIG_MAC80211_RC_MINSTREL_HT
-int rc80211_minstrel_ht_init(void);
-void rc80211_minstrel_ht_exit(void);
-#else
-static inline int rc80211_minstrel_ht_init(void)
-{
- return 0;
-}
-static inline void rc80211_minstrel_ht_exit(void)
-{
-}
-#endif
-
#endif /* IEEE80211_RATE_H */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 07fb219327d6..a34e9c2ca626 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -167,12 +167,6 @@ minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
if (unlikely(!mrs->att_hist)) {
mrs->prob_ewma = cur_prob;
} else {
- /* update exponential weighted moving variance */
- mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv,
- cur_prob,
- mrs->prob_ewma,
- EWMA_LEVEL);
-
/*update exponential weighted moving avarage */
mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
cur_prob,
@@ -572,141 +566,6 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
minstrel_update_rates(mp, mi);
}
-static void *
-minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct ieee80211_supported_band *sband;
- struct minstrel_sta_info *mi;
- struct minstrel_priv *mp = priv;
- struct ieee80211_hw *hw = mp->hw;
- int max_rates = 0;
- int i;
-
- mi = kzalloc(sizeof(struct minstrel_sta_info), gfp);
- if (!mi)
- return NULL;
-
- for (i = 0; i < NUM_NL80211_BANDS; i++) {
- sband = hw->wiphy->bands[i];
- if (sband && sband->n_bitrates > max_rates)
- max_rates = sband->n_bitrates;
- }
-
- mi->r = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
- if (!mi->r)
- goto error;
-
- mi->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
- if (!mi->sample_table)
- goto error1;
-
- mi->last_stats_update = jiffies;
- return mi;
-
-error1:
- kfree(mi->r);
-error:
- kfree(mi);
- return NULL;
-}
-
-static void
-minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
-{
- struct minstrel_sta_info *mi = priv_sta;
-
- kfree(mi->sample_table);
- kfree(mi->r);
- kfree(mi);
-}
-
-static void
-minstrel_init_cck_rates(struct minstrel_priv *mp)
-{
- static const int bitrates[4] = { 10, 20, 55, 110 };
- struct ieee80211_supported_band *sband;
- u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
- int i, j;
-
- sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
- if (!sband)
- return;
-
- for (i = 0, j = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *rate = &sband->bitrates[i];
-
- if (rate->flags & IEEE80211_RATE_ERP_G)
- continue;
-
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
- if (rate->bitrate != bitrates[j])
- continue;
-
- mp->cck_rates[j] = i;
- break;
- }
- }
-}
-
-static void *
-minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- struct minstrel_priv *mp;
-
- mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
- if (!mp)
- return NULL;
-
- /* contention window settings
- * Just an approximation. Using the per-queue values would complicate
- * the calculations and is probably unnecessary */
- mp->cw_min = 15;
- mp->cw_max = 1023;
-
- /* number of packets (in %) to use for sampling other rates
- * sample less often for non-mrr packets, because the overhead
- * is much higher than with mrr */
- mp->lookaround_rate = 5;
- mp->lookaround_rate_mrr = 10;
-
- /* maximum time that the hw is allowed to stay in one MRR segment */
- mp->segment_size = 6000;
-
- if (hw->max_rate_tries > 0)
- mp->max_retry = hw->max_rate_tries;
- else
- /* safe default, does not necessarily have to match hw properties */
- mp->max_retry = 7;
-
- if (hw->max_rates >= 4)
- mp->has_mrr = true;
-
- mp->hw = hw;
- mp->update_interval = 100;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- mp->fixed_rate_idx = (u32) -1;
- mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
- 0666, debugfsdir, &mp->fixed_rate_idx);
-#endif
-
- minstrel_init_cck_rates(mp);
-
- return mp;
-}
-
-static void
-minstrel_free(void *priv)
-{
-#ifdef CONFIG_MAC80211_DEBUGFS
- debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
-#endif
- kfree(priv);
-}
-
static u32 minstrel_get_expected_throughput(void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
@@ -725,29 +584,8 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
}
const struct rate_control_ops mac80211_minstrel = {
- .name = "minstrel",
.tx_status_ext = minstrel_tx_status,
.get_rate = minstrel_get_rate,
.rate_init = minstrel_rate_init,
- .alloc = minstrel_alloc,
- .free = minstrel_free,
- .alloc_sta = minstrel_alloc_sta,
- .free_sta = minstrel_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = minstrel_add_sta_debugfs,
- .remove_sta_debugfs = minstrel_remove_sta_debugfs,
-#endif
.get_expected_throughput = minstrel_get_expected_throughput,
};
-
-int __init
-rc80211_minstrel_init(void)
-{
- return ieee80211_rate_control_register(&mac80211_minstrel);
-}
-
-void
-rc80211_minstrel_exit(void)
-{
- ieee80211_rate_control_unregister(&mac80211_minstrel);
-}
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index be6c3f35f48b..23ec953e3a24 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -35,19 +35,6 @@ minstrel_ewma(int old, int new, int weight)
return old + incr;
}
-/*
- * Perform EWMV (Exponentially Weighted Moving Variance) calculation
- */
-static inline int
-minstrel_ewmv(int old_ewmv, int cur_prob, int prob_ewma, int weight)
-{
- int diff, incr;
-
- diff = cur_prob - prob_ewma;
- incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
- return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV;
-}
-
struct minstrel_rate_stats {
/* current / last sampling period attempts/success counters */
u16 attempts, last_attempts;
@@ -56,11 +43,8 @@ struct minstrel_rate_stats {
/* total attempts/success counters */
u32 att_hist, succ_hist;
- /* statistis of packet delivery probability
- * prob_ewma - exponential weighted moving average of prob
- * prob_ewmsd - exp. weighted moving standard deviation of prob */
+ /* prob_ewma - exponential weighted moving average of prob */
u16 prob_ewma;
- u16 prob_ewmv;
/* maximum retry counts */
u8 retry_count;
@@ -109,11 +93,6 @@ struct minstrel_sta_info {
/* sampling table */
u8 *sample_table;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *dbg_stats;
- struct dentry *dbg_stats_csv;
-#endif
};
struct minstrel_priv {
@@ -137,7 +116,6 @@ struct minstrel_priv {
* - setting will be applied on next update
*/
u32 fixed_rate_idx;
- struct dentry *dbg_fixed_rate;
#endif
};
@@ -146,17 +124,8 @@ struct minstrel_debugfs_info {
char buf[];
};
-/* Get EWMSD (Exponentially Weighted Moving Standard Deviation) * 10 */
-static inline int
-minstrel_get_ewmsd10(struct minstrel_rate_stats *mrs)
-{
- unsigned int ewmv = mrs->prob_ewmv;
- return int_sqrt(MINSTREL_TRUNC(ewmv * 1000 * 1000));
-}
-
extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
-void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
/* Recalculate success probabilities and counters for a given rate using EWMA */
void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
@@ -165,7 +134,5 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
/* debugfs */
int minstrel_stats_open(struct inode *inode, struct file *file);
int minstrel_stats_csv_open(struct inode *inode, struct file *file);
-ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
-int minstrel_stats_release(struct inode *inode, struct file *file);
#endif
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 9ad7d63d3e5b..c8afd85b51a0 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -54,22 +54,6 @@
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
-ssize_t
-minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
-{
- struct minstrel_debugfs_info *ms;
-
- ms = file->private_data;
- return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
-}
-
-int
-minstrel_stats_release(struct inode *inode, struct file *file)
-{
- kfree(file->private_data);
- return 0;
-}
-
int
minstrel_stats_open(struct inode *inode, struct file *file)
{
@@ -86,14 +70,13 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p = ms->buf;
p += sprintf(p, "\n");
p += sprintf(p,
- "best __________rate_________ ________statistics________ ____last_____ ______sum-of________\n");
+ "best __________rate_________ ____statistics___ ____last_____ ______sum-of________\n");
p += sprintf(p,
- "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n");
+ "rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n");
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
- unsigned int prob_ewmsd;
*(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -109,15 +92,13 @@ minstrel_stats_open(struct inode *inode, struct file *file)
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
" %3u %3u %-3u "
"%9llu %-9llu\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -135,14 +116,6 @@ minstrel_stats_open(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations minstrel_stat_fops = {
- .owner = THIS_MODULE,
- .open = minstrel_stats_open,
- .read = minstrel_stats_read,
- .release = minstrel_stats_release,
- .llseek = default_llseek,
-};
-
int
minstrel_stats_csv_open(struct inode *inode, struct file *file)
{
@@ -161,7 +134,6 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
- unsigned int prob_ewmsd;
p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : ""));
p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : ""));
@@ -177,14 +149,12 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
"%llu,%llu,%d,%d\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -200,33 +170,3 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
return 0;
}
-
-static const struct file_operations minstrel_stat_csv_fops = {
- .owner = THIS_MODULE,
- .open = minstrel_stats_csv_open,
- .read = minstrel_stats_read,
- .release = minstrel_stats_release,
- .llseek = default_llseek,
-};
-
-void
-minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
-{
- struct minstrel_sta_info *mi = priv_sta;
-
- mi->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, mi,
- &minstrel_stat_fops);
-
- mi->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, mi,
- &minstrel_stat_csv_fops);
-}
-
-void
-minstrel_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct minstrel_sta_info *mi = priv_sta;
-
- debugfs_remove(mi->dbg_stats);
-
- debugfs_remove(mi->dbg_stats_csv);
-}
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 67ebdeaffbbc..f466ec37d161 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -52,22 +52,23 @@
_streams - 1
/* MCS rate information for an MCS group */
-#define MCS_GROUP(_streams, _sgi, _ht40) \
+#define MCS_GROUP(_streams, _sgi, _ht40, _s) \
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
+ .shift = _s, \
.flags = \
IEEE80211_TX_RC_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
- MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
} \
}
@@ -80,9 +81,10 @@
#define BW2VBPS(_bw, r3, r2, r1) \
(_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
-#define VHT_GROUP(_streams, _sgi, _bw) \
+#define VHT_GROUP(_streams, _sgi, _bw, _s) \
[VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
.streams = _streams, \
+ .shift = _s, \
.flags = \
IEEE80211_TX_RC_VHT_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@@ -90,25 +92,25 @@
_bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)), \
+ BW2VBPS(_bw, 117, 54, 26)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 234, 108, 52)), \
+ BW2VBPS(_bw, 234, 108, 52)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 351, 162, 78)), \
+ BW2VBPS(_bw, 351, 162, 78)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 468, 216, 104)), \
+ BW2VBPS(_bw, 468, 216, 104)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 702, 324, 156)), \
+ BW2VBPS(_bw, 702, 324, 156)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 936, 432, 208)), \
+ BW2VBPS(_bw, 936, 432, 208)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1053, 486, 234)), \
+ BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1170, 540, 260)), \
+ BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1404, 648, 312)), \
+ BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1560, 720, 346)) \
+ BW2VBPS(_bw, 1560, 720, 346)) >> _s \
} \
}
@@ -121,28 +123,27 @@
(CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
-#define CCK_DURATION_LIST(_short) \
- CCK_ACK_DURATION(10, _short), \
- CCK_ACK_DURATION(20, _short), \
- CCK_ACK_DURATION(55, _short), \
- CCK_ACK_DURATION(110, _short)
+#define CCK_DURATION_LIST(_short, _s) \
+ CCK_ACK_DURATION(10, _short) >> _s, \
+ CCK_ACK_DURATION(20, _short) >> _s, \
+ CCK_ACK_DURATION(55, _short) >> _s, \
+ CCK_ACK_DURATION(110, _short) >> _s
-#define CCK_GROUP \
+#define CCK_GROUP(_s) \
[MINSTREL_CCK_GROUP] = { \
- .streams = 0, \
+ .streams = 1, \
.flags = 0, \
+ .shift = _s, \
.duration = { \
- CCK_DURATION_LIST(false), \
- CCK_DURATION_LIST(true) \
+ CCK_DURATION_LIST(false, _s), \
+ CCK_DURATION_LIST(true, _s) \
} \
}
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
static bool minstrel_vht_only = true;
module_param(minstrel_vht_only, bool, 0644);
MODULE_PARM_DESC(minstrel_vht_only,
"Use only VHT rates when VHT is supported by sta.");
-#endif
/*
* To enable sufficiently targeted rate sampling, MCS rates are divided into
@@ -153,49 +154,47 @@ MODULE_PARM_DESC(minstrel_vht_only,
* BW -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
- MCS_GROUP(1, 0, BW_20),
- MCS_GROUP(2, 0, BW_20),
- MCS_GROUP(3, 0, BW_20),
+ MCS_GROUP(1, 0, BW_20, 5),
+ MCS_GROUP(2, 0, BW_20, 4),
+ MCS_GROUP(3, 0, BW_20, 4),
- MCS_GROUP(1, 1, BW_20),
- MCS_GROUP(2, 1, BW_20),
- MCS_GROUP(3, 1, BW_20),
+ MCS_GROUP(1, 1, BW_20, 5),
+ MCS_GROUP(2, 1, BW_20, 4),
+ MCS_GROUP(3, 1, BW_20, 4),
- MCS_GROUP(1, 0, BW_40),
- MCS_GROUP(2, 0, BW_40),
- MCS_GROUP(3, 0, BW_40),
+ MCS_GROUP(1, 0, BW_40, 4),
+ MCS_GROUP(2, 0, BW_40, 4),
+ MCS_GROUP(3, 0, BW_40, 4),
- MCS_GROUP(1, 1, BW_40),
- MCS_GROUP(2, 1, BW_40),
- MCS_GROUP(3, 1, BW_40),
+ MCS_GROUP(1, 1, BW_40, 4),
+ MCS_GROUP(2, 1, BW_40, 4),
+ MCS_GROUP(3, 1, BW_40, 4),
- CCK_GROUP,
+ CCK_GROUP(8),
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
- VHT_GROUP(1, 0, BW_20),
- VHT_GROUP(2, 0, BW_20),
- VHT_GROUP(3, 0, BW_20),
+ VHT_GROUP(1, 0, BW_20, 5),
+ VHT_GROUP(2, 0, BW_20, 4),
+ VHT_GROUP(3, 0, BW_20, 4),
- VHT_GROUP(1, 1, BW_20),
- VHT_GROUP(2, 1, BW_20),
- VHT_GROUP(3, 1, BW_20),
+ VHT_GROUP(1, 1, BW_20, 5),
+ VHT_GROUP(2, 1, BW_20, 4),
+ VHT_GROUP(3, 1, BW_20, 4),
- VHT_GROUP(1, 0, BW_40),
- VHT_GROUP(2, 0, BW_40),
- VHT_GROUP(3, 0, BW_40),
+ VHT_GROUP(1, 0, BW_40, 4),
+ VHT_GROUP(2, 0, BW_40, 4),
+ VHT_GROUP(3, 0, BW_40, 4),
- VHT_GROUP(1, 1, BW_40),
- VHT_GROUP(2, 1, BW_40),
- VHT_GROUP(3, 1, BW_40),
+ VHT_GROUP(1, 1, BW_40, 4),
+ VHT_GROUP(2, 1, BW_40, 4),
+ VHT_GROUP(3, 1, BW_40, 4),
- VHT_GROUP(1, 0, BW_80),
- VHT_GROUP(2, 0, BW_80),
- VHT_GROUP(3, 0, BW_80),
+ VHT_GROUP(1, 0, BW_80, 4),
+ VHT_GROUP(2, 0, BW_80, 4),
+ VHT_GROUP(3, 0, BW_80, 4),
- VHT_GROUP(1, 1, BW_80),
- VHT_GROUP(2, 1, BW_80),
- VHT_GROUP(3, 1, BW_80),
-#endif
+ VHT_GROUP(1, 1, BW_80, 4),
+ VHT_GROUP(2, 1, BW_80, 4),
+ VHT_GROUP(3, 1, BW_80, 4),
};
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
@@ -282,7 +281,8 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
break;
/* short preamble */
- if (!(mi->supported[group] & BIT(idx)))
+ if ((mi->supported[group] & BIT(idx + 4)) &&
+ (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
idx += 4;
}
return &mi->groups[group].rates[idx];
@@ -311,7 +311,8 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
if (group != MINSTREL_CCK_GROUP)
nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
- nsecs += minstrel_mcs_groups[group].duration[rate];
+ nsecs += minstrel_mcs_groups[group].duration[rate] <<
+ minstrel_mcs_groups[group].shift;
/*
* For the throughput calculation, limit the probability value to 90% to
@@ -759,12 +760,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
minstrel_ht_update_rates(mp, mi);
}
+static inline int
+minstrel_get_duration(int index)
+{
+ const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+ unsigned int duration = group->duration[index % MCS_GROUP_RATES];
+ return duration << group->shift;
+}
+
static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int index)
{
struct minstrel_rate_stats *mrs;
- const struct mcs_group *group;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
unsigned int ctime = 0;
@@ -783,8 +791,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
mrs->retry_count_rtscts = 2;
mrs->retry_updated = true;
- group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
- tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
+ tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000;
/* Contention time for first 2 tries */
ctime = (t_slot * cw) >> 1;
@@ -878,20 +885,24 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
int group = mi->max_prob_rate / MCS_GROUP_RATES;
const struct mcs_group *g = &minstrel_mcs_groups[group];
int rate = mi->max_prob_rate % MCS_GROUP_RATES;
+ unsigned int duration;
/* Disable A-MSDU if max_prob_rate is bad */
if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
return 1;
+ duration = g->duration[rate];
+ duration <<= g->shift;
+
/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
- if (g->duration[rate] > MCS_DURATION(1, 0, 52))
+ if (duration > MCS_DURATION(1, 0, 52))
return 500;
/*
* If the rate is slower than single-stream MCS4, limit A-MSDU to usual
* data packet size
*/
- if (g->duration[rate] > MCS_DURATION(1, 0, 104))
+ if (duration > MCS_DURATION(1, 0, 104))
return 1600;
/*
@@ -899,7 +910,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
* rate success probability is less than 75%, limit A-MSDU to twice the usual
* data packet size
*/
- if (g->duration[rate] > MCS_DURATION(1, 0, 260) ||
+ if (duration > MCS_DURATION(1, 0, 260) ||
(minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100)))
return 3200;
@@ -946,13 +957,6 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
rate_control_set_rates(mp->hw, mi->sta, rates);
}
-static inline int
-minstrel_get_duration(int index)
-{
- const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
- return group->duration[index % MCS_GROUP_RATES];
-}
-
static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
@@ -1000,10 +1004,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
return -1;
/*
- * Do not sample if the probability is already higher than 95%
- * to avoid wasting airtime.
+ * Do not sample if the probability is already higher than 95%,
+ * or if the rate is 3 times slower than the current max probability
+ * rate, to avoid wasting airtime.
*/
- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100))
+ sample_dur = minstrel_get_duration(sample_idx);
+ if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
+ minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
return -1;
/*
@@ -1013,7 +1020,6 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
MCS_GROUP_RATES].streams;
- sample_dur = minstrel_get_duration(sample_idx);
if (sample_dur >= minstrel_get_duration(tp_rate2) &&
(cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
@@ -1077,18 +1083,23 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
return;
sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
+ sample_idx %= MCS_GROUP_RATES;
+
+ if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
+ (sample_idx >= 4) != txrc->short_preamble)
+ return;
+
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
rate->count = 1;
- if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+ if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) {
int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
rate->idx = mp->cck_rates[idx];
} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
sample_group->streams);
} else {
- rate->idx = sample_idx % MCS_GROUP_RATES +
- (sample_group->streams - 1) * 8;
+ rate->idx = sample_idx + (sample_group->streams - 1) * 8;
}
rate->flags = sample_group->flags;
@@ -1130,14 +1141,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
- u16 sta_cap = sta->ht_cap.cap;
+ u16 ht_cap = sta->ht_cap.cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct sta_info *sinfo = container_of(sta, struct sta_info, sta);
int use_vht;
int n_supported = 0;
int ack_dur;
int stbc;
int i;
+ bool ldpc;
/* fall back to the old minstrel for legacy stations */
if (!sta->ht_cap.ht_supported)
@@ -1145,12 +1156,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
if (vht_cap->vht_supported)
use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
else
-#endif
- use_vht = 0;
+ use_vht = 0;
msp->is_ht = true;
memset(mi, 0, sizeof(*mi));
@@ -1175,16 +1184,22 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
}
mi->sample_tries = 4;
- /* TODO tx_flags for vht - ATM the RC API is not fine-grained enough */
if (!use_vht) {
- stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
+ stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
IEEE80211_HT_CAP_RX_STBC_SHIFT;
- mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
- if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
- mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+ ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING;
+ } else {
+ stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >>
+ IEEE80211_VHT_CAP_RXSTBC_SHIFT;
+
+ ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC;
}
+ mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
+ if (ldpc)
+ mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+
for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
u32 gflags = minstrel_mcs_groups[i].flags;
int bw, nss;
@@ -1197,10 +1212,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (gflags & IEEE80211_TX_RC_SHORT_GI) {
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
- if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
+ if (!(ht_cap & IEEE80211_HT_CAP_SGI_40))
continue;
} else {
- if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
+ if (!(ht_cap & IEEE80211_HT_CAP_SGI_20))
continue;
}
}
@@ -1217,10 +1232,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
/* HT rate */
if (gflags & IEEE80211_TX_RC_MCS) {
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
if (use_vht && minstrel_vht_only)
continue;
-#endif
+
mi->supported[i] = mcs->rx_mask[nss - 1];
if (mi->supported[i])
n_supported++;
@@ -1258,8 +1272,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (!n_supported)
goto use_legacy;
- if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE))
- mi->cck_supported_short |= mi->cck_supported_short << 4;
+ mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4;
/* create an initial rate table with the lowest supported rates */
minstrel_ht_update_stats(mp, mi);
@@ -1340,16 +1353,88 @@ minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
kfree(msp);
}
+static void
+minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
+{
+ static const int bitrates[4] = { 10, 20, 55, 110 };
+ struct ieee80211_supported_band *sband;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
+ int i, j;
+
+ sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ struct ieee80211_rate *rate = &sband->bitrates[i];
+
+ if (rate->flags & IEEE80211_RATE_ERP_G)
+ continue;
+
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
+ if (rate->bitrate != bitrates[j])
+ continue;
+
+ mp->cck_rates[j] = i;
+ break;
+ }
+ }
+}
+
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
- return mac80211_minstrel.alloc(hw, debugfsdir);
+ struct minstrel_priv *mp;
+
+ mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
+ if (!mp)
+ return NULL;
+
+ /* contention window settings
+ * Just an approximation. Using the per-queue values would complicate
+ * the calculations and is probably unnecessary */
+ mp->cw_min = 15;
+ mp->cw_max = 1023;
+
+ /* number of packets (in %) to use for sampling other rates
+ * sample less often for non-mrr packets, because the overhead
+ * is much higher than with mrr */
+ mp->lookaround_rate = 5;
+ mp->lookaround_rate_mrr = 10;
+
+ /* maximum time that the hw is allowed to stay in one MRR segment */
+ mp->segment_size = 6000;
+
+ if (hw->max_rate_tries > 0)
+ mp->max_retry = hw->max_rate_tries;
+ else
+ /* safe default, does not necessarily have to match hw properties */
+ mp->max_retry = 7;
+
+ if (hw->max_rates >= 4)
+ mp->has_mrr = true;
+
+ mp->hw = hw;
+ mp->update_interval = 100;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ mp->fixed_rate_idx = (u32) -1;
+ debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
+ &mp->fixed_rate_idx);
+#endif
+
+ minstrel_ht_init_cck_rates(mp);
+
+ return mp;
}
static void
minstrel_ht_free(void *priv)
{
- mac80211_minstrel.free(priv);
+ kfree(priv);
}
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
@@ -1384,7 +1469,6 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
- .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
#endif
.get_expected_throughput = minstrel_ht_get_expected_throughput,
};
@@ -1409,14 +1493,14 @@ static void __init init_sample_table(void)
}
int __init
-rc80211_minstrel_ht_init(void)
+rc80211_minstrel_init(void)
{
init_sample_table();
return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}
void
-rc80211_minstrel_ht_exit(void)
+rc80211_minstrel_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index de1646c42e82..26b7a3244b47 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -15,11 +15,7 @@
*/
#define MINSTREL_MAX_STREAMS 3
#define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
#define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
-#else
-#define MINSTREL_VHT_STREAM_GROUPS 0
-#endif
#define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \
MINSTREL_HT_STREAM_GROUPS)
@@ -34,16 +30,13 @@
#define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB)
#define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1)
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
#define MCS_GROUP_RATES 10
-#else
-#define MCS_GROUP_RATES 8
-#endif
struct mcs_group {
- u32 flags;
- unsigned int streams;
- unsigned int duration[MCS_GROUP_RATES];
+ u16 flags;
+ u8 streams;
+ u8 shift;
+ u16 duration[MCS_GROUP_RATES];
};
extern const struct mcs_group minstrel_mcs_groups[];
@@ -110,17 +103,12 @@ struct minstrel_ht_sta_priv {
struct minstrel_ht_sta ht;
struct minstrel_sta_info legacy;
};
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *dbg_stats;
- struct dentry *dbg_stats_csv;
-#endif
void *ratelist;
void *sample_table;
bool is_ht;
};
void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
-void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta);
int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
int prob_ewma);
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index bfcc03152dc6..57820a5f2c16 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -15,6 +15,22 @@
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
+static ssize_t
+minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
+{
+ struct minstrel_debugfs_info *ms;
+
+ ms = file->private_data;
+ return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
+}
+
+static int
+minstrel_stats_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
static char *
minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
{
@@ -41,7 +57,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
- unsigned int prob_ewmsd;
+ unsigned int duration;
if (!(mi->supported[i] & BIT(j)))
continue;
@@ -79,21 +95,21 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
p += sprintf(p, " %3u ", idx);
/* tx_time[rate(i)] in usec */
- tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000);
+ duration = mg->duration[j];
+ duration <<= mg->shift;
+ tx_time = DIV_ROUND_CLOSEST(duration, 1000);
p += sprintf(p, "%6u ", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
" %3u %3u %-3u "
"%9llu %-9llu\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -130,9 +146,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "\n");
p += sprintf(p,
- " best ____________rate__________ ________statistics________ _____last____ ______sum-of________\n");
+ " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n");
p += sprintf(p,
- "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n");
+ "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n");
p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
for (i = 0; i < MINSTREL_CCK_GROUP; i++)
@@ -187,7 +203,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
- unsigned int prob_ewmsd;
+ unsigned int duration;
if (!(mi->supported[i] & BIT(j)))
continue;
@@ -222,20 +238,21 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
}
p += sprintf(p, "%u,", idx);
- tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000);
+
+ duration = mg->duration[j];
+ duration <<= mg->shift;
+ tx_time = DIV_ROUND_CLOSEST(duration, 1000);
p += sprintf(p, "%u,", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
"%u,%llu,%llu,",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -303,17 +320,8 @@ minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
- msp->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, msp,
- &minstrel_ht_stat_fops);
- msp->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, msp,
- &minstrel_ht_stat_csv_fops);
-}
-
-void
-minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct minstrel_ht_sta_priv *msp = priv_sta;
-
- debugfs_remove(msp->dbg_stats);
- debugfs_remove(msp->dbg_stats_csv);
+ debugfs_create_file("rc_stats", 0444, dir, msp,
+ &minstrel_ht_stat_fops);
+ debugfs_create_file("rc_stats_csv", 0444, dir, msp,
+ &minstrel_ht_stat_csv_fops);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a0ca27aeb732..3bd3b5769797 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2458,8 +2458,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if (!xmit_skb)
net_info_ratelimited("%s: failed to clone multicast frame\n",
dev->name);
- } else if (!is_multicast_ether_addr(ehdr->h_dest)) {
- dsta = sta_info_get(sdata, skb->data);
+ } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
+ !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
+ dsta = sta_info_get(sdata, ehdr->h_dest);
if (dsta) {
/*
* The destination station is associated to
@@ -4240,11 +4241,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
if (fast_rx->internal_forward) {
struct sk_buff *xmit_skb = NULL;
- bool multicast = is_multicast_ether_addr(skb->data);
-
- if (multicast) {
+ if (is_multicast_ether_addr(addrs.da)) {
xmit_skb = skb_copy(skb, GFP_ATOMIC);
- } else if (sta_info_get(rx->sdata, skb->data)) {
+ } else if (!ether_addr_equal(addrs.da, addrs.sa) &&
+ sta_info_get(rx->sdata, addrs.da)) {
xmit_skb = skb;
skb = NULL;
}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 91d7c0cd1882..aa4afbf0abaf 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -987,6 +987,25 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_tx_status_ext);
+void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[info->band];
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_tx_status status = {
+ .info = info,
+ .sta = pubsta,
+ };
+
+ rate_control_tx_status(local, sband, &status);
+
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
+ sta->tx_stats.last_rate = info->status.rates[0];
+}
+EXPORT_SYMBOL(ieee80211_tx_rate_update);
+
void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 0ab69a1964f8..588c51a67c89 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2600,6 +2600,29 @@ TRACE_EVENT(drv_wake_tx_queue,
)
);
+TRACE_EVENT(drv_get_ftm_responder_stats,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats),
+
+ TP_ARGS(local, sdata, ftm_stats),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG
+ )
+);
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 36a3c2ada515..bec424316ea4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -264,6 +264,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
struct ieee80211_txq *txq = sta->sta.txq[i];
+ if (!txq)
+ continue;
+
txqi = to_txq_info(txq);
if (ac != txq->ac)
@@ -2175,6 +2178,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
case NL80211_IFTYPE_AP:
changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS;
+ if (sdata->vif.bss_conf.ftm_responder == 1 &&
+ wiphy_ext_feature_isset(sdata->local->hw.wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER))
+ changed |= BSS_CHANGED_FTM_RESPONDER;
+
if (sdata->vif.type == NL80211_IFTYPE_AP) {
changed |= BSS_CHANGED_AP_PROBE_RESP;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7f891ffffc05..5fe274c47c41 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2031,6 +2031,40 @@ nla_put_failure:
return -EMSGSIZE;
}
+#if IS_ENABLED(CONFIG_INET)
+static int mpls_valid_fib_dump_req(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ return ip_valid_fib_dump_req(nlh, extack);
+}
+#else
+static int mpls_valid_fib_dump_req(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request");
+ return -EINVAL;
+ }
+
+ rtm = nlmsg_data(nlh);
+ if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
+ rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
+ rtm->rtm_type || rtm->rtm_flags) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in FIB dump request");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
@@ -2042,7 +2076,7 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
ASSERT_RTNL();
if (cb->strict_check) {
- int err = ip_valid_fib_dump_req(nlh, cb->extack);
+ int err = mpls_valid_fib_dump_req(nlh, cb->extack);
if (err < 0)
return err;
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 3d0a33b874f5..13c9b5eeb3b7 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -175,6 +175,8 @@ struct ncsi_package;
#define NCSI_RESERVED_CHANNEL 0x1f
#define NCSI_CHANNEL_INDEX(c) ((c) & ((1 << NCSI_PACKAGE_SHIFT) - 1))
#define NCSI_TO_CHANNEL(p, c) (((p) << NCSI_PACKAGE_SHIFT) | (c))
+#define NCSI_MAX_PACKAGE 8
+#define NCSI_MAX_CHANNEL 32
struct ncsi_channel {
unsigned char id;
@@ -220,11 +222,15 @@ struct ncsi_request {
bool used; /* Request that has been assigned */
unsigned int flags; /* NCSI request property */
#define NCSI_REQ_FLAG_EVENT_DRIVEN 1
+#define NCSI_REQ_FLAG_NETLINK_DRIVEN 2
struct ncsi_dev_priv *ndp; /* Associated NCSI device */
struct sk_buff *cmd; /* Associated NCSI command packet */
struct sk_buff *rsp; /* Associated NCSI response packet */
struct timer_list timer; /* Timer on waiting for response */
bool enabled; /* Time has been enabled or not */
+ u32 snd_seq; /* netlink sending sequence number */
+ u32 snd_portid; /* netlink portid of sender */
+ struct nlmsghdr nlhdr; /* netlink message header */
};
enum {
@@ -310,6 +316,7 @@ struct ncsi_cmd_arg {
unsigned int dwords[4];
};
unsigned char *data; /* NCSI OEM data */
+ struct genl_info *info; /* Netlink information */
};
extern struct list_head ncsi_dev_list;
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 82b7d9201db8..356af474e43c 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -17,6 +17,7 @@
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
@@ -346,6 +347,13 @@ int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
if (!nr)
return -ENOMEM;
+ /* track netlink information */
+ if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ nr->snd_seq = nca->info->snd_seq;
+ nr->snd_portid = nca->info->snd_portid;
+ nr->nlhdr = *nca->info->nlhdr;
+ }
+
/* Prepare the packet */
nca->id = nr->id;
ret = nch->handler(nr->cmd, nca);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 091284760d21..6aa0614d2d28 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -19,6 +19,7 @@
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
+#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
@@ -406,6 +407,9 @@ static void ncsi_request_timeout(struct timer_list *t)
{
struct ncsi_request *nr = from_timer(nr, t, timer);
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct ncsi_cmd_pkt *cmd;
+ struct ncsi_package *np;
+ struct ncsi_channel *nc;
unsigned long flags;
/* If the request already had associated response,
@@ -419,6 +423,18 @@ static void ncsi_request_timeout(struct timer_list *t)
}
spin_unlock_irqrestore(&ndp->lock, flags);
+ if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ if (nr->cmd) {
+ /* Find the package */
+ cmd = (struct ncsi_cmd_pkt *)
+ skb_network_header(nr->cmd);
+ ncsi_find_package_and_channel(ndp,
+ cmd->cmd.common.channel,
+ &np, &nc);
+ ncsi_send_netlink_timeout(nr, np, nc);
+ }
+ }
+
/* Release the request */
ncsi_free_request(nr);
}
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 32cb7751d216..33314381b4f5 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -19,6 +19,7 @@
#include <uapi/linux/ncsi.h>
#include "internal.h"
+#include "ncsi-pkt.h"
#include "ncsi-netlink.h"
static struct genl_family ncsi_genl_family;
@@ -28,6 +29,7 @@ static const struct nla_policy ncsi_genl_policy[NCSI_ATTR_MAX + 1] = {
[NCSI_ATTR_PACKAGE_LIST] = { .type = NLA_NESTED },
[NCSI_ATTR_PACKAGE_ID] = { .type = NLA_U32 },
[NCSI_ATTR_CHANNEL_ID] = { .type = NLA_U32 },
+ [NCSI_ATTR_DATA] = { .type = NLA_BINARY, .len = 2048 },
};
static struct ncsi_dev_priv *ndp_from_ifindex(struct net *net, u32 ifindex)
@@ -365,6 +367,202 @@ static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
return 0;
}
+static int ncsi_send_cmd_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct ncsi_dev_priv *ndp;
+ struct ncsi_pkt_hdr *hdr;
+ struct ncsi_cmd_arg nca;
+ unsigned char *data;
+ u32 package_id;
+ u32 channel_id;
+ int len, ret;
+
+ if (!info || !info->attrs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_IFINDEX]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_PACKAGE_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_CHANNEL_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
+ nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
+ if (!ndp) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
+ channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
+
+ if (package_id >= NCSI_MAX_PACKAGE || channel_id >= NCSI_MAX_CHANNEL) {
+ ret = -ERANGE;
+ goto out_netlink;
+ }
+
+ len = nla_len(info->attrs[NCSI_ATTR_DATA]);
+ if (len < sizeof(struct ncsi_pkt_hdr)) {
+ netdev_info(ndp->ndev.dev, "NCSI: no command to send %u\n",
+ package_id);
+ ret = -EINVAL;
+ goto out_netlink;
+ } else {
+ data = (unsigned char *)nla_data(info->attrs[NCSI_ATTR_DATA]);
+ }
+
+ hdr = (struct ncsi_pkt_hdr *)data;
+
+ nca.ndp = ndp;
+ nca.package = (unsigned char)package_id;
+ nca.channel = (unsigned char)channel_id;
+ nca.type = hdr->type;
+ nca.req_flags = NCSI_REQ_FLAG_NETLINK_DRIVEN;
+ nca.info = info;
+ nca.payload = ntohs(hdr->length);
+ nca.data = data + sizeof(*hdr);
+
+ ret = ncsi_xmit_cmd(&nca);
+out_netlink:
+ if (ret != 0) {
+ netdev_err(ndp->ndev.dev,
+ "NCSI: Error %d sending command\n",
+ ret);
+ ncsi_send_netlink_err(ndp->ndev.dev,
+ info->snd_seq,
+ info->snd_portid,
+ info->nlhdr,
+ ret);
+ }
+out:
+ return ret;
+}
+
+int ncsi_send_netlink_rsp(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc)
+{
+ struct sk_buff *skb;
+ struct net *net;
+ void *hdr;
+ int rc;
+
+ net = dev_net(nr->rsp->dev);
+
+ skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
+ &ncsi_genl_family, 0, NCSI_CMD_SEND_CMD);
+ if (!hdr) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->rsp->dev->ifindex);
+ if (np)
+ nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
+ if (nc)
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
+ else
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
+
+ rc = nla_put(skb, NCSI_ATTR_DATA, nr->rsp->len, (void *)nr->rsp->data);
+ if (rc)
+ goto err;
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_unicast(net, skb, nr->snd_portid);
+
+err:
+ kfree_skb(skb);
+ return rc;
+}
+
+int ncsi_send_netlink_timeout(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc)
+{
+ struct sk_buff *skb;
+ struct net *net;
+ void *hdr;
+
+ skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
+ &ncsi_genl_family, 0, NCSI_CMD_SEND_CMD);
+ if (!hdr) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ net = dev_net(nr->cmd->dev);
+
+ nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->cmd->dev->ifindex);
+
+ if (np)
+ nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
+ else
+ nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID,
+ NCSI_PACKAGE_INDEX((((struct ncsi_pkt_hdr *)
+ nr->cmd->data)->channel)));
+
+ if (nc)
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
+ else
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_unicast(net, skb, nr->snd_portid);
+}
+
+int ncsi_send_netlink_err(struct net_device *dev,
+ u32 snd_seq,
+ u32 snd_portid,
+ struct nlmsghdr *nlhdr,
+ int err)
+{
+ struct nlmsghdr *nlh;
+ struct nlmsgerr *nle;
+ struct sk_buff *skb;
+ struct net *net;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ net = dev_net(dev);
+
+ nlh = nlmsg_put(skb, snd_portid, snd_seq,
+ NLMSG_ERROR, sizeof(*nle), 0);
+ nle = (struct nlmsgerr *)nlmsg_data(nlh);
+ nle->error = err;
+ memcpy(&nle->msg, nlhdr, sizeof(*nlh));
+
+ nlmsg_end(skb, nlh);
+
+ return nlmsg_unicast(net->genl_sock, skb, snd_portid);
+}
+
static const struct genl_ops ncsi_ops[] = {
{
.cmd = NCSI_CMD_PKG_INFO,
@@ -385,6 +583,12 @@ static const struct genl_ops ncsi_ops[] = {
.doit = ncsi_clear_interface_nl,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = NCSI_CMD_SEND_CMD,
+ .policy = ncsi_genl_policy,
+ .doit = ncsi_send_cmd_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family ncsi_genl_family __ro_after_init = {
diff --git a/net/ncsi/ncsi-netlink.h b/net/ncsi/ncsi-netlink.h
index 91a5c256f8c4..c4a46887a932 100644
--- a/net/ncsi/ncsi-netlink.h
+++ b/net/ncsi/ncsi-netlink.h
@@ -14,6 +14,18 @@
#include "internal.h"
+int ncsi_send_netlink_rsp(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc);
+int ncsi_send_netlink_timeout(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc);
+int ncsi_send_netlink_err(struct net_device *dev,
+ u32 snd_seq,
+ u32 snd_portid,
+ struct nlmsghdr *nlhdr,
+ int err);
+
int ncsi_init_netlink(struct net_device *dev);
int ncsi_unregister_netlink(struct net_device *dev);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index d66b34749027..85fa59afae34 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -16,9 +16,11 @@
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
+#include "ncsi-netlink.h"
static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
unsigned short payload)
@@ -32,15 +34,25 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
* before calling this function.
*/
h = (struct ncsi_rsp_pkt_hdr *)skb_network_header(nr->rsp);
- if (h->common.revision != NCSI_PKT_REVISION)
+
+ if (h->common.revision != NCSI_PKT_REVISION) {
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: unsupported header revision\n");
return -EINVAL;
- if (ntohs(h->common.length) != payload)
+ }
+ if (ntohs(h->common.length) != payload) {
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: payload length mismatched\n");
return -EINVAL;
+ }
/* Check on code and reason */
if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
- ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR)
- return -EINVAL;
+ ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: non zero response/reason code\n");
+ return -EPERM;
+ }
/* Validate checksum, which might be zeroes if the
* sender doesn't support checksum according to NCSI
@@ -52,8 +64,11 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
checksum = ncsi_calculate_checksum((unsigned char *)h,
sizeof(*h) + payload - 4);
- if (*pchecksum != htonl(checksum))
+
+ if (*pchecksum != htonl(checksum)) {
+ netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n");
return -EINVAL;
+ }
return 0;
}
@@ -941,6 +956,26 @@ static int ncsi_rsp_handler_gpuuid(struct ncsi_request *nr)
return 0;
}
+static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
+{
+ struct ncsi_dev_priv *ndp = nr->ndp;
+ struct ncsi_rsp_pkt *rsp;
+ struct ncsi_package *np;
+ struct ncsi_channel *nc;
+ int ret;
+
+ /* Find the package */
+ rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
+ ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
+ &np, &nc);
+ if (!np)
+ return -ENODEV;
+
+ ret = ncsi_send_netlink_rsp(nr, np, nc);
+
+ return ret;
+}
+
static struct ncsi_rsp_handler {
unsigned char type;
int payload;
@@ -1043,6 +1078,17 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
netdev_warn(ndp->ndev.dev,
"NCSI: 'bad' packet ignored for type 0x%x\n",
hdr->type);
+
+ if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ if (ret == -EPERM)
+ goto out_netlink;
+ else
+ ncsi_send_netlink_err(ndp->ndev.dev,
+ nr->snd_seq,
+ nr->snd_portid,
+ &nr->nlhdr,
+ ret);
+ }
goto out;
}
@@ -1052,6 +1098,17 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
netdev_err(ndp->ndev.dev,
"NCSI: Handler for packet type 0x%x returned %d\n",
hdr->type, ret);
+
+out_netlink:
+ if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ ret = ncsi_rsp_handler_netlink(nr);
+ if (ret) {
+ netdev_err(ndp->ndev.dev,
+ "NCSI: Netlink handler for packet type 0x%x returned %d\n",
+ hdr->type, ret);
+ }
+ }
+
out:
ncsi_free_request(nr);
return ret;
diff --git a/net/rds/send.c b/net/rds/send.c
index 57b3d5a8b2db..fe785ee819dd 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1007,7 +1007,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
return ret;
}
-static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
+static int rds_send_mprds_hash(struct rds_sock *rs,
+ struct rds_connection *conn, int nonblock)
{
int hash;
@@ -1023,10 +1024,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
* used. But if we are interrupted, we have to use the zero
* c_path in case the connection ends up being non-MP capable.
*/
- if (conn->c_npaths == 0)
+ if (conn->c_npaths == 0) {
+ /* Cannot wait for the connection be made, so just use
+ * the base c_path.
+ */
+ if (nonblock)
+ return 0;
if (wait_event_interruptible(conn->c_hs_waitq,
conn->c_npaths != 0))
hash = 0;
+ }
if (conn->c_npaths == 1)
hash = 0;
}
@@ -1256,7 +1263,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
}
if (conn->c_trans->t_mp_capable)
- cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
+ cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
else
cpath = &conn->c_path[0];
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 76569c178915..0a7c49e8e053 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -302,6 +302,7 @@ struct rxrpc_peer {
/* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32
+ spinlock_t rtt_input_lock; /* RTT lock for input routine */
ktime_t rtt_last_req; /* Time of last RTT request */
u64 rtt; /* Current RTT estimate (in nS) */
u64 rtt_sum; /* Sum of cache contents */
@@ -442,17 +443,17 @@ struct rxrpc_connection {
spinlock_t state_lock; /* state-change lock */
enum rxrpc_conn_cache_state cache_state;
enum rxrpc_conn_proto_state state; /* current state of connection */
- u32 local_abort; /* local abort code */
- u32 remote_abort; /* remote abort code */
+ u32 abort_code; /* Abort code of connection abort */
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
u32 security_nonce; /* response re-use preventer */
- u16 service_id; /* Service ID, possibly upgraded */
+ u32 service_id; /* Service ID, possibly upgraded */
u8 size_align; /* data size alignment (for security) */
u8 security_size; /* security header size */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+ short error; /* Local error code */
};
static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
@@ -635,6 +636,8 @@ struct rxrpc_call {
bool tx_phase; /* T if transmission phase, F if receive phase */
u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
+ spinlock_t input_lock; /* Lock for packet input to this call */
+
/* receive-phase ACK management */
u8 ackr_reason; /* reason to ACK */
u16 ackr_skew; /* skew on packet being ACK'd */
@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *);
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
struct rxrpc_sock *,
- struct rxrpc_peer *,
- struct rxrpc_connection *,
struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
-int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
- struct sockaddr_rxrpc *, gfp_t);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
+ struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
+ gfp_t);
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
* input.c
*/
-void rxrpc_data_ready(struct sock *);
+int rxrpc_input_packet(struct sock *, struct sk_buff *);
/*
* insecure.c
@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
*/
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
+ struct rxrpc_peer *);
void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
@@ -1059,6 +1062,7 @@ void rxrpc_put_peer(struct rxrpc_peer *);
*/
extern const struct seq_operations rxrpc_call_seq_ops;
extern const struct seq_operations rxrpc_connection_seq_ops;
+extern const struct seq_operations rxrpc_peer_seq_ops;
/*
* recvmsg.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 8354cadbb839..e0d8ca03169a 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
(peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1));
- rxrpc_new_incoming_peer(local, peer);
+ rxrpc_new_incoming_peer(rx, local, peer);
}
/* Now allocate and set up the connection */
@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
*/
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_sock *rx,
- struct rxrpc_peer *peer,
- struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_connection *conn;
+ struct rxrpc_peer *peer;
struct rxrpc_call *call;
_enter("");
@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
goto out;
}
+ /* The peer, connection and call may all have sprung into existence due
+ * to a duplicate packet being handled on another CPU in parallel, so
+ * we have to recheck the routing. However, we're now holding
+ * rx->incoming_lock, so the values should remain stable.
+ */
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
+
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
case RXRPC_CONN_SERVICE:
write_lock(&call->state_lock);
- if (rx->discard_new_call)
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
- else
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ if (rx->discard_new_call)
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+ else
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ }
write_unlock(&call->state_lock);
break;
case RXRPC_CONN_REMOTELY_ABORTED:
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
- conn->remote_abort, -ECONNABORTED);
+ conn->abort_code, conn->error);
break;
case RXRPC_CONN_LOCALLY_ABORTED:
rxrpc_abort_call("CON", call, sp->hdr.seq,
- conn->local_abort, -ECONNABORTED);
+ conn->abort_code, conn->error);
break;
default:
BUG();
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 799f75b6900d..8f1a8f85b1f9 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
spin_lock_init(&call->notify_lock);
+ spin_lock_init(&call->input_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = debug_id;
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(call, cp, srx, gfp);
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(call, cp, srx, gfp);
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 8acf74fe24c0..521189f4b666 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -276,7 +276,8 @@ dont_reuse:
* If we return with a connection, the call will be on its waiting list. It's
* left to the caller to assign a channel and wake up the call.
*/
-static int rxrpc_get_client_conn(struct rxrpc_call *call,
+static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
+ cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
if (!cp->peer)
goto error;
@@ -683,7 +684,8 @@ out:
* find a connection for a call
* - called in process context with IRQs enabled
*/
-int rxrpc_connect_call(struct rxrpc_call *call,
+int rxrpc_connect_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet);
- ret = rxrpc_get_client_conn(call, cp, srx, gfp);
+ ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
if (ret < 0)
goto out;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 6df56ce68861..b6fca8ebb117 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
switch (chan->last_type) {
case RXRPC_PACKET_TYPE_ABORT:
- _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
+ _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
break;
case RXRPC_PACKET_TYPE_ACK:
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
* pass a connection-level abort onto all calls on that connection
*/
static void rxrpc_abort_calls(struct rxrpc_connection *conn,
- enum rxrpc_call_completion compl,
- u32 abort_code, int error)
+ enum rxrpc_call_completion compl)
{
struct rxrpc_call *call;
int i;
- _enter("{%d},%x", conn->debug_id, abort_code);
+ _enter("{%d},%x", conn->debug_id, conn->abort_code);
spin_lock(&conn->channel_lock);
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
trace_rxrpc_abort(call->debug_id,
"CON", call->cid,
call->call_id, 0,
- abort_code, error);
+ conn->abort_code,
+ conn->error);
if (rxrpc_set_call_completion(call, compl,
- abort_code, error))
+ conn->abort_code,
+ conn->error))
rxrpc_notify_socket(call);
}
}
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return 0;
}
+ conn->error = error;
+ conn->abort_code = abort_code;
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
spin_unlock_bh(&conn->state_lock);
- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
+ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_namelen = conn->params.peer->srx.transport_len;
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
whdr._rsvd = 0;
whdr.serviceId = htons(conn->service_id);
- word = htonl(conn->local_abort);
+ word = htonl(conn->abort_code);
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
serial = atomic_inc_return(&conn->serial);
whdr.serial = htonl(serial);
- _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
+ _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
if (ret < 0) {
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
+ conn->error = -ECONNABORTED;
+ conn->abort_code = abort_code;
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
- abort_code, -ECONNABORTED);
+ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5b2626929822..9128aa0e40aa 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
/*
* Apply a hard ACK by advancing the Tx window.
*/
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
struct sk_buff *skb, *list = NULL;
+ bool rot_last = false;
int ix;
u8 annotation;
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb->next = list;
list = skb;
- if (annotation & RXRPC_TX_ANNO_LAST)
+ if (annotation & RXRPC_TX_ANNO_LAST) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ rot_last = true;
+ }
if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
summary->nr_rot_new_acks++;
}
spin_unlock(&call->lock);
- trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+ trace_rxrpc_transmit(call, (rot_last ?
rxrpc_transmit_rotate_last :
rxrpc_transmit_rotate));
wake_up(&call->waitq);
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb_mark_not_on_list(skb);
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
+
+ return rot_last;
}
/*
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
const char *abort_why)
{
+ unsigned int state;
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
write_lock(&call->state_lock);
- switch (call->state) {
+ state = call->state;
+ switch (state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (reply_begun)
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+ call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
else
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
break;
case RXRPC_CALL_SERVER_AWAIT_ACK:
__rxrpc_call_completed(call);
rxrpc_notify_socket(call);
+ state = call->state;
break;
default:
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
}
write_unlock(&call->state_lock);
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
+ if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
- } else {
+ else
trace_rxrpc_transmit(call, rxrpc_transmit_end);
- }
_leave(" = ok");
return true;
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
- if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
- rxrpc_rotate_tx_window(call, top, &summary);
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_proto_abort("TXL", call, top);
- return false;
+ if (!rxrpc_rotate_tx_window(call, top, &summary)) {
+ rxrpc_proto_abort("TXL", call, top);
+ return false;
+ }
}
if (!rxrpc_end_tx_phase(call, true, "ETD"))
return false;
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
}
}
+ spin_lock(&call->input_lock);
+
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call))
- return;
+ goto unlock;
call->ackr_prev_seq = seq;
@@ -488,12 +497,16 @@ next_subpacket:
if (flags & RXRPC_LAST_PACKET) {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- seq != call->rx_top)
- return rxrpc_proto_abort("LSN", call, seq);
+ seq != call->rx_top) {
+ rxrpc_proto_abort("LSN", call, seq);
+ goto unlock;
+ }
} else {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- after_eq(seq, call->rx_top))
- return rxrpc_proto_abort("LSA", call, seq);
+ after_eq(seq, call->rx_top)) {
+ rxrpc_proto_abort("LSA", call, seq);
+ goto unlock;
+ }
}
trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
@@ -560,8 +573,10 @@ next_subpacket:
skip:
offset += len;
if (flags & RXRPC_JUMBO_PACKET) {
- if (skb_copy_bits(skb, offset, &flags, 1) < 0)
- return rxrpc_proto_abort("XJF", call, seq);
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
+ rxrpc_proto_abort("XJF", call, seq);
+ goto unlock;
+ }
offset += sizeof(struct rxrpc_jumbo_header);
seq++;
serial++;
@@ -601,6 +616,9 @@ ack:
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
}
+
+unlock:
+ spin_unlock(&call->input_lock);
_leave(" [queued]");
}
@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
ping_time = call->ping_time;
smp_rmb();
- ping_serial = call->ping_serial;
+ ping_serial = READ_ONCE(call->ping_serial);
if (orig_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
- if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
- before(orig_serial, ping_serial))
+ if (before(orig_serial, ping_serial) ||
+ !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
return;
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
if (after(orig_serial, ping_serial))
return;
@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_propose_ack_respond_to_ack);
}
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest))
+ return;
+
+ buf.info.rxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(buf.info)) {
- if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
- return rxrpc_proto_abort("XAI", call, 0);
+ if (skb->len >= ioffset + sizeof(buf.info) &&
+ skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
+ return rxrpc_proto_abort("XAI", call, 0);
+
+ spin_lock(&call->input_lock);
+
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest))
+ goto out;
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_latest = sp->hdr.serial;
+
+ /* Parse rwind and mtu sizes if provided. */
+ if (buf.info.rxMTU)
rxrpc_input_ackinfo(call, skb, &buf.info);
- }
- if (first_soft_ack == 0)
- return rxrpc_proto_abort("AK0", call, 0);
+ if (first_soft_ack == 0) {
+ rxrpc_proto_abort("AK0", call, 0);
+ goto out;
+ }
/* Ignore ACKs unless we are or have just been transmitting. */
switch (READ_ONCE(call->state)) {
@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
case RXRPC_CALL_SERVER_AWAIT_ACK:
break;
default:
- return;
- }
-
- /* Discard any out-of-order or duplicate ACKs. */
- if (before_eq(sp->hdr.serial, call->acks_latest)) {
- _debug("discard ACK %d <= %d",
- sp->hdr.serial, call->acks_latest);
- return;
+ goto out;
}
- call->acks_latest_ts = skb->tstamp;
- call->acks_latest = sp->hdr.serial;
if (before(hard_ack, call->tx_hard_ack) ||
- after(hard_ack, call->tx_top))
- return rxrpc_proto_abort("AKW", call, 0);
- if (nr_acks > call->tx_top - hard_ack)
- return rxrpc_proto_abort("AKN", call, 0);
+ after(hard_ack, call->tx_top)) {
+ rxrpc_proto_abort("AKW", call, 0);
+ goto out;
+ }
+ if (nr_acks > call->tx_top - hard_ack) {
+ rxrpc_proto_abort("AKN", call, 0);
+ goto out;
+ }
- if (after(hard_ack, call->tx_hard_ack))
- rxrpc_rotate_tx_window(call, hard_ack, &summary);
+ if (after(hard_ack, call->tx_hard_ack)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ rxrpc_end_tx_phase(call, false, "ETA");
+ goto out;
+ }
+ }
if (nr_acks > 0) {
- if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
- return rxrpc_proto_abort("XSA", call, 0);
+ if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
+ rxrpc_proto_abort("XSA", call, 0);
+ goto out;
+ }
rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
&summary);
}
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_end_tx_phase(call, false, "ETA");
- return;
- }
-
if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
- return rxrpc_congestion_management(call, skb, &summary, acked_serial);
+ rxrpc_congestion_management(call, skb, &summary, acked_serial);
+out:
+ spin_unlock(&call->input_lock);
}
/*
@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
_proto("Rx ACKALL %%%u", sp->hdr.serial);
- rxrpc_rotate_tx_window(call, call->tx_top, &summary);
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ spin_lock(&call->input_lock);
+
+ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
rxrpc_end_tx_phase(call, false, "ETL");
+
+ spin_unlock(&call->input_lock);
}
/*
@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
}
/*
- * Handle a new call on a channel implicitly completing the preceding call on
- * that channel.
+ * Handle a new service call on a channel implicitly completing the preceding
+ * call on that channel. This does not apply to client conns.
*
* TODO: If callNumber > call_id + 1, renegotiate security.
*/
-static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
+static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
+ struct rxrpc_connection *conn,
struct rxrpc_call *call)
{
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
- break;
+ /* Fall through */
case RXRPC_CALL_COMPLETE:
break;
default:
@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
+ trace_rxrpc_improper_term(call);
break;
}
- trace_rxrpc_improper_term(call);
+ spin_lock(&rx->incoming_lock);
__rxrpc_disconnect_call(conn, call);
+ spin_unlock(&rx->incoming_lock);
rxrpc_notify_socket(call);
}
@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* The socket is locked by the caller and this prevents the socket from being
* shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns.
+ *
+ * Called with the RCU read lock held from the IP layer via UDP.
*/
-void rxrpc_data_ready(struct sock *udp_sk)
+int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
{
struct rxrpc_connection *conn;
struct rxrpc_channel *chan;
@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk)
struct rxrpc_local *local = udp_sk->sk_user_data;
struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL;
- struct sk_buff *skb;
unsigned int channel;
- int ret, skew = 0;
+ int skew = 0;
_enter("%p", udp_sk);
- ASSERT(!irqs_disabled());
-
- skb = skb_recv_udp(udp_sk, 0, 1, &ret);
- if (!skb) {
- if (ret == -EAGAIN)
- return;
- _debug("UDP socket error %d", ret);
- return;
- }
-
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
rxrpc_new_skb(skb, rxrpc_skb_rx_received);
- _net("recv skb %p", skb);
-
- /* we'll probably need to checksum it (didn't call sock_recvmsg) */
- if (skb_checksum_complete(skb)) {
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
- __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
- _leave(" [CSUM failed]");
- return;
- }
-
- __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
+ skb_pull(skb, sizeof(struct udphdr));
/* The UDP protocol already released all skb resources;
* we are free to add our own data there.
@@ -1177,10 +1196,12 @@ void rxrpc_data_ready(struct sock *udp_sk)
if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp);
rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
- return;
+ return 0;
}
}
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
trace_rxrpc_rx_packet(sp);
switch (sp->hdr.type) {
@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (sp->hdr.serviceId == 0)
goto bad_message;
- rcu_read_lock();
-
if (rxrpc_to_server(sp)) {
/* Weed out packets to services we're not offering. Packets
* that would begin a call are explicitly rejected and the rest
@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
sp->hdr.seq == 1)
goto unsupported_service;
- goto discard_unlock;
+ goto discard;
}
}
@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk)
goto wrong_security;
if (sp->hdr.serviceId != conn->service_id) {
- if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
- conn->service_id != conn->params.service_id)
+ int old_id;
+
+ if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
+ goto reupgrade;
+ old_id = cmpxchg(&conn->service_id, conn->params.service_id,
+ sp->hdr.serviceId);
+
+ if (old_id != conn->params.service_id &&
+ old_id != sp->hdr.serviceId)
goto reupgrade;
- conn->service_id = sp->hdr.serviceId;
}
if (sp->hdr.callNumber == 0) {
/* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id);
rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
+ goto out;
}
/* Note the serial number skew here */
@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
/* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call)
- goto discard_unlock;
+ goto discard;
if (sp->hdr.callNumber == chan->last_call) {
if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
- goto discard_unlock;
+ goto discard;
/* For the previous service call, if completed
* successfully, we discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK)
- goto discard_unlock;
+ goto discard;
/* But otherwise we need to retransmit the final packet
* from data cached in the connection record.
@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
sp->hdr.serial,
sp->hdr.flags, 0);
rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
+ goto out;
}
call = rcu_dereference(chan->call);
if (sp->hdr.callNumber > chan->call_id) {
- if (rxrpc_to_client(sp)) {
- rcu_read_unlock();
+ if (rxrpc_to_client(sp))
goto reject_packet;
- }
if (call)
- rxrpc_input_implicit_end_call(conn, call);
+ rxrpc_input_implicit_end_call(rx, conn, call);
call = NULL;
}
@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (!call || atomic_read(&call->usage) == 0) {
if (rxrpc_to_client(sp) ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
- goto bad_message_unlock;
+ goto bad_message;
if (sp->hdr.seq != 1)
- goto discard_unlock;
- call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
- if (!call) {
- rcu_read_unlock();
+ goto discard;
+ call = rxrpc_new_incoming_call(local, rx, skb);
+ if (!call)
goto reject_packet;
- }
rxrpc_send_ping(call, skb, skew);
mutex_unlock(&call->user_mutex);
}
rxrpc_input_call_packet(call, skb, skew);
- goto discard_unlock;
+ goto discard;
-discard_unlock:
- rcu_read_unlock();
discard:
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
out:
trace_rxrpc_rx_done(0, 0);
- return;
-
-out_unlock:
- rcu_read_unlock();
- goto out;
+ return 0;
wrong_security:
- rcu_read_unlock();
trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
unsupported_service:
- rcu_read_unlock();
trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EOPNOTSUPP);
skb->priority = RX_INVALID_OPERATION;
goto post_abort;
reupgrade:
- rcu_read_unlock();
trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error;
-bad_message_unlock:
- rcu_read_unlock();
bad_message:
trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
@@ -1397,4 +1407,5 @@ reject_packet:
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb);
_leave(" [badmsg]");
+ return 0;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 94d234e9c685..cad0691c2bb4 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/hashtable.h>
#include <net/sock.h>
+#include <net/udp.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
*/
static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{
- struct sock *sock;
+ struct sock *usk;
int ret, opt;
_enter("%p{%d,%d}",
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
return ret;
}
+ /* set the socket up */
+ usk = local->socket->sk;
+ inet_sk(usk)->mc_loop = 0;
+
+ /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ inet_inc_convert_csum(usk);
+
+ rcu_assign_sk_user_data(usk, local);
+
+ udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
+ udp_sk(usk)->encap_rcv = rxrpc_input_packet;
+ udp_sk(usk)->encap_destroy = NULL;
+ udp_sk(usk)->gro_receive = NULL;
+ udp_sk(usk)->gro_complete = NULL;
+
+ udp_encap_enable();
+#if IS_ENABLED(CONFIG_IPV6)
+ if (local->srx.transport.family == AF_INET6)
+ udpv6_encap_enable();
+#endif
+ usk->sk_error_report = rxrpc_error_report;
+
/* if a local address was supplied then bind it */
if (local->srx.transport_len > sizeof(sa_family_t)) {
_debug("bind");
@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
BUG();
}
- /* set the socket up */
- sock = local->socket->sk;
- sock->sk_user_data = local;
- sock->sk_data_ready = rxrpc_data_ready;
- sock->sk_error_report = rxrpc_error_report;
_leave(" = 0");
return 0;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 417d80867c4f..fd7eba8467fa 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -102,6 +102,9 @@ static __net_init int rxrpc_init_net(struct net *net)
proc_create_net("conns", 0444, rxnet->proc_net,
&rxrpc_connection_seq_ops,
sizeof(struct seq_net_private));
+ proc_create_net("peers", 0444, rxnet->proc_net,
+ &rxrpc_peer_seq_ops,
+ sizeof(struct seq_net_private));
return 0;
err_proc:
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 81a7869325a6..7feb611c7258 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -303,6 +303,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
if (rtt < 0)
return;
+ spin_lock(&peer->rtt_input_lock);
+
/* Replace the oldest datum in the RTT buffer */
sum -= peer->rtt_cache[cursor];
sum += rtt;
@@ -314,6 +316,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_usage = usage;
}
+ spin_unlock(&peer->rtt_input_lock);
+
/* Now recalculate the average */
if (usage == RXRPC_RTT_CACHE_SIZE) {
avg = sum / RXRPC_RTT_CACHE_SIZE;
@@ -322,6 +326,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
do_div(avg, usage);
}
+ /* Don't need to update this under lock */
peer->rtt = avg;
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
usage, avg);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 01a9febfa367..5691b7d266ca 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
* assess the MTU size for the network interface through which this peer is
* reached
*/
-static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
+static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+ struct rxrpc_peer *peer)
{
+ struct net *net = sock_net(&rx->sk);
struct dst_entry *dst;
struct rtable *rt;
struct flowi fl;
@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
switch (peer->srx.transport.family) {
case AF_INET:
rt = ip_route_output_ports(
- &init_net, fl4, NULL,
+ net, fl4, NULL,
peer->srx.transport.sin.sin_addr.s_addr, 0,
htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
if (IS_ERR(rt)) {
@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
sizeof(struct in6_addr));
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
- dst = ip6_route_output(&init_net, NULL, fl6);
+ dst = ip6_route_output(net, NULL, fl6);
if (dst->error) {
_leave(" [route err %d]", dst->error);
return;
@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
+ spin_lock_init(&peer->rtt_input_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
if (RXRPC_TX_SMSS > 2190)
@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
/*
* Initialise peer record.
*/
-static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
+static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+ unsigned long hash_key)
{
peer->hash_key = hash_key;
- rxrpc_assess_MTU_size(peer);
+ rxrpc_assess_MTU_size(rx, peer);
peer->mtu = peer->if_mtu;
peer->rtt_last_req = ktime_get_real();
@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
/*
* Set up a new peer.
*/
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
struct sockaddr_rxrpc *srx,
unsigned long hash_key,
gfp_t gfp)
@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
peer = rxrpc_alloc_peer(local, gfp);
if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx));
- rxrpc_init_peer(peer, hash_key);
+ rxrpc_init_peer(rx, peer, hash_key);
}
_leave(" = %p", peer);
@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
* since we've already done a search in the list from the non-reentrant context
* (the data_ready handler) that is the only place we can add new peers.
*/
-void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
+void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
+ struct rxrpc_peer *peer)
{
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
peer->local = local;
- rxrpc_init_peer(peer, hash_key);
+ rxrpc_init_peer(rx, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
/*
* obtain a remote transport endpoint for the specified address
*/
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
/* The peer is not yet present in hash - create a candidate
* for a new record and then redo the search.
*/
- candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
+ candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
if (!candidate) {
_leave(" = NULL [nomem]");
return NULL;
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 9805e3b85c36..c7d976859d40 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -212,3 +212,129 @@ const struct seq_operations rxrpc_connection_seq_ops = {
.stop = rxrpc_connection_seq_stop,
.show = rxrpc_connection_seq_show,
};
+
+/*
+ * generate a list of extant virtual peers in /proc/net/rxrpc/peers
+ */
+static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
+{
+ struct rxrpc_peer *peer;
+ time64_t now;
+ char lbuff[50], rbuff[50];
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq,
+ "Proto Local "
+ " Remote "
+ " Use CW MTU LastUse RTT Rc\n"
+ );
+ return 0;
+ }
+
+ peer = list_entry(v, struct rxrpc_peer, hash_link);
+
+ sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
+
+ sprintf(rbuff, "%pISpc", &peer->srx.transport);
+
+ now = ktime_get_seconds();
+ seq_printf(seq,
+ "UDP %-47.47s %-47.47s %3u"
+ " %3u %5u %6llus %12llu %2u\n",
+ lbuff,
+ rbuff,
+ atomic_read(&peer->usage),
+ peer->cong_cwnd,
+ peer->mtu,
+ now - peer->last_tx_at,
+ peer->rtt,
+ peer->rtt_cursor);
+
+ return 0;
+}
+
+static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
+ __acquires(rcu)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ unsigned int bucket, n;
+ unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
+ void *p;
+
+ rcu_read_lock();
+
+ if (*_pos >= UINT_MAX)
+ return NULL;
+
+ n = *_pos & ((1U << shift) - 1);
+ bucket = *_pos >> shift;
+ for (;;) {
+ if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
+ *_pos = UINT_MAX;
+ return NULL;
+ }
+ if (n == 0) {
+ if (bucket == 0)
+ return SEQ_START_TOKEN;
+ *_pos += 1;
+ n++;
+ }
+
+ p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
+ if (p)
+ return p;
+ bucket++;
+ n = 1;
+ *_pos = (bucket << shift) | n;
+ }
+}
+
+static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ unsigned int bucket, n;
+ unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
+ void *p;
+
+ if (*_pos >= UINT_MAX)
+ return NULL;
+
+ bucket = *_pos >> shift;
+
+ p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
+ if (p)
+ return p;
+
+ for (;;) {
+ bucket++;
+ n = 1;
+ *_pos = (bucket << shift) | n;
+
+ if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
+ *_pos = UINT_MAX;
+ return NULL;
+ }
+ if (n == 0) {
+ *_pos += 1;
+ n++;
+ }
+
+ p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
+ if (p)
+ return p;
+ }
+}
+
+static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+
+const struct seq_operations rxrpc_peer_seq_ops = {
+ .start = rxrpc_peer_seq_start,
+ .next = rxrpc_peer_seq_next,
+ .stop = rxrpc_peer_seq_stop,
+ .show = rxrpc_peer_seq_show,
+};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index ac79a40a0392..4b28fd44576d 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -391,6 +391,7 @@ static int u32_init(struct tcf_proto *tp)
RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, root_ht);
+ root_ht->refcnt++;
rcu_assign_pointer(tp->root, root_ht);
tp->data = tp_c;
return 0;
@@ -606,7 +607,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
struct tc_u_hnode __rcu **hn;
struct tc_u_hnode *phn;
- WARN_ON(ht->refcnt);
+ WARN_ON(--ht->refcnt);
u32_clear_hnode(tp, ht, extack);
@@ -634,7 +635,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
WARN_ON(root_ht == NULL);
- if (root_ht && --root_ht->refcnt == 0)
+ if (root_ht && --root_ht->refcnt == 1)
u32_destroy_hnode(tp, root_ht, extack);
if (--tp_c->refcnt == 0) {
@@ -679,7 +680,6 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
}
if (ht->refcnt == 1) {
- ht->refcnt--;
u32_destroy_hnode(tp, ht, extack);
} else {
NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index dc539295ae65..b910cd5c56f7 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2644,7 +2644,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
for (i = 1; i <= CAKE_QUEUES; i++)
quantum_div[i] = 65535 / i;
- q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
+ q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
GFP_KERNEL);
if (!q->tins)
goto nomem;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 338222a6c664..4b1af706896c 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -92,8 +92,8 @@ struct fq_sched_data {
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
- u32 flow_max_rate; /* optional max rate per flow */
u32 flow_plimit; /* max packets per flow */
+ unsigned long flow_max_rate; /* optional max rate per flow */
u32 orphan_mask; /* mask for orphaned skb */
u32 low_rate_threshold;
struct rb_root *fq_root;
@@ -416,7 +416,8 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
- u32 rate, plen;
+ unsigned long rate;
+ u32 plen;
skb = fq_dequeue_head(sch, &q->internal);
if (skb)
@@ -443,7 +444,7 @@ begin:
}
skb = f->head;
- if (skb && !skb_is_tcp_pure_ack(skb)) {
+ if (skb) {
u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp),
f->time_next_packet);
@@ -485,11 +486,11 @@ begin:
if (f->credit > 0)
goto out;
}
- if (rate != ~0U) {
+ if (rate != ~0UL) {
u64 len = (u64)plen * NSEC_PER_SEC;
if (likely(rate))
- do_div(len, rate);
+ len = div64_ul(len, rate);
/* Since socket rate can change later,
* clamp the delay to 1 second.
* Really, providers of too big packets should be fixed !
@@ -701,9 +702,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
- if (tb[TCA_FQ_FLOW_MAX_RATE])
- q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+ if (tb[TCA_FQ_FLOW_MAX_RATE]) {
+ u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+ q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
+ }
if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
q->low_rate_threshold =
nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
@@ -766,7 +769,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->quantum = 2 * psched_mtu(qdisc_dev(sch));
q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
q->flow_refill_delay = msecs_to_jiffies(40);
- q->flow_max_rate = ~0U;
+ q->flow_max_rate = ~0UL;
q->time_next_delayed_flow = ~0ULL;
q->rate_enable = 1;
q->new_flows.first = NULL;
@@ -802,7 +805,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
- nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
+ min_t(unsigned long, q->flow_max_rate, ~0U)) ||
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
jiffies_to_usecs(q->flow_refill_delay)) ||
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3023929852e8..de1663f7d3ad 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -572,6 +572,18 @@ struct Qdisc noop_qdisc = {
.dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
+ .gso_skb = {
+ .next = (struct sk_buff *)&noop_qdisc.gso_skb,
+ .prev = (struct sk_buff *)&noop_qdisc.gso_skb,
+ .qlen = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
+ },
+ .skb_bad_txq = {
+ .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
+ .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
+ .qlen = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
+ },
};
EXPORT_SYMBOL(noop_qdisc);
@@ -1273,8 +1285,6 @@ static void dev_init_scheduler_queue(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue->qdisc_sleeping = qdisc;
- __skb_queue_head_init(&qdisc->gso_skb);
- __skb_queue_head_init(&qdisc->skb_bad_txq);
}
void dev_init_scheduler(struct net_device *dev)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index fb886b525d95..f6552e4f4b43 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -477,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
+ if (bc_rcvlink)
+ bc_rcvlink->tolerance = tolerance;
l->net_plane = net_plane;
l->advertised_mtu = mtu;
l->mtu = mtu;
@@ -843,14 +845,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+
l->in_session = false;
l->session++;
l->mtu = l->advertised_mtu;
+
spin_lock_bh(&l->wakeupq.lock);
+ skb_queue_splice_init(&l->wakeupq, &list);
+ spin_unlock_bh(&l->wakeupq.lock);
+
spin_lock_bh(&l->inputq->lock);
- skb_queue_splice_init(&l->wakeupq, l->inputq);
+ skb_queue_splice_init(&list, l->inputq);
spin_unlock_bh(&l->inputq->lock);
- spin_unlock_bh(&l->wakeupq.lock);
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
@@ -1031,7 +1040,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
/* Detect repeated retransmit failures on same packet */
if (r->last_retransm != buf_seqno(skb)) {
r->last_retransm = buf_seqno(skb);
- r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
+ r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
if (link_is_bc_sndlink(l))
@@ -1576,9 +1585,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own priority if peer's priority is higher */
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
@@ -1604,9 +1614,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own prio if peer indicates a different value */
if ((peers_prio != l->priority) &&
in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
@@ -2223,6 +2234,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq)
{
l->tolerance = tol;
+ if (l->bc_rcvlink)
+ l->bc_rcvlink->tolerance = tol;
if (link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index db148c4a916a..de09f514428c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1198,6 +1198,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
* @skb: pointer to message buffer.
*/
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
@@ -1215,7 +1216,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
- goto exit;
+
+ /* State change is ignored if socket already awake,
+ * - convert msg to abort msg and add to inqueue
+ */
+ msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+ msg_set_type(hdr, TIPC_CONN_MSG);
+ msg_set_size(hdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ __skb_queue_tail(inputq, skb);
+ return;
}
tsk->probe_unacked = false;
@@ -1943,7 +1953,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
switch (msg_user(hdr)) {
case CONN_MANAGER:
- tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
+ tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9783101bc4a9..10dc59ce9c82 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -650,6 +650,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
u8 node_id[NODE_ID_LEN] = {0,};
+ int rmcast = 0;
ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
if (!ub)
@@ -680,6 +681,9 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (err)
goto err;
+ /* Checking remote ip address */
+ rmcast = tipc_udp_is_mcast_addr(&remote);
+
/* Autoconfigure own node identity if needed */
if (!tipc_own_id(net)) {
memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
@@ -705,7 +709,12 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
goto err;
}
udp_conf.family = AF_INET;
- udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ /* Switch to use ANY to receive packets from group */
+ if (rmcast)
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ else
+ udp_conf.local_ip.s_addr = local.ipv4.s_addr;
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
@@ -719,7 +728,10 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums = true;
udp_conf.use_udp6_rx_checksums = true;
- udp_conf.local_ip6 = in6addr_any;
+ if (rmcast)
+ udp_conf.local_ip6 = in6addr_any;
+ else
+ udp_conf.local_ip6 = local.ipv6;
b->mtu = 1280;
#endif
} else {
@@ -741,7 +753,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
* is used if it's a multicast address.
*/
memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
- if (tipc_udp_is_mcast_addr(&remote))
+ if (rmcast)
err = enable_mcast(ub, &remote);
else
err = tipc_udp_rcast_add(b, &remote);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a88551f3bc43..5bd01058b9e6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1019,36 +1019,49 @@ void cfg80211_cqm_config_free(struct wireless_dev *wdev)
wdev->cqm_config = NULL;
}
-void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
ASSERT_RTNL();
- if (WARN_ON(wdev->netdev))
- return;
-
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
list_del_rcu(&wdev->list);
- synchronize_rcu();
+ if (sync)
+ synchronize_rcu();
rdev->devlist_generation++;
+ cfg80211_mlme_purge_registrations(wdev);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
- cfg80211_mlme_purge_registrations(wdev);
cfg80211_stop_p2p_device(rdev, wdev);
break;
case NL80211_IFTYPE_NAN:
cfg80211_stop_nan(rdev, wdev);
break;
default:
- WARN_ON_ONCE(1);
break;
}
+#ifdef CONFIG_CFG80211_WEXT
+ kzfree(wdev->wext.keys);
+#endif
+ /* only initialized if we have a netdev */
+ if (wdev->netdev)
+ flush_work(&wdev->disconnect_wk);
+
cfg80211_cqm_config_free(wdev);
}
+
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+ if (WARN_ON(wdev->netdev))
+ return;
+
+ __cfg80211_unregister_wdev(wdev, true);
+}
EXPORT_SYMBOL(cfg80211_unregister_wdev);
static const struct device_type wiphy_type = {
@@ -1153,6 +1166,30 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
}
EXPORT_SYMBOL(cfg80211_stop_iface);
+void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ mutex_init(&wdev->mtx);
+ INIT_LIST_HEAD(&wdev->event_list);
+ spin_lock_init(&wdev->event_lock);
+ INIT_LIST_HEAD(&wdev->mgmt_registrations);
+ spin_lock_init(&wdev->mgmt_registrations_lock);
+
+ /*
+ * We get here also when the interface changes network namespaces,
+ * as it's registered into the new one, but we don't want it to
+ * change ID in that case. Checking if the ID is already assigned
+ * works, because 0 isn't considered a valid ID and the memory is
+ * 0-initialized.
+ */
+ if (!wdev->identifier)
+ wdev->identifier = ++rdev->wdev_id;
+ list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
+ rdev->devlist_generation++;
+
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+}
+
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state, void *ptr)
{
@@ -1178,23 +1215,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
* called within code protected by it when interfaces
* are added with nl80211.
*/
- mutex_init(&wdev->mtx);
- INIT_LIST_HEAD(&wdev->event_list);
- spin_lock_init(&wdev->event_lock);
- INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
-
- /*
- * We get here also when the interface changes network namespaces,
- * as it's registered into the new one, but we don't want it to
- * change ID in that case. Checking if the ID is already assigned
- * works, because 0 isn't considered a valid ID and the memory is
- * 0-initialized.
- */
- if (!wdev->identifier)
- wdev->identifier = ++rdev->wdev_id;
- list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
- rdev->devlist_generation++;
/* can only change netns with wiphy */
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -1223,7 +1243,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
- nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+ cfg80211_init_wdev(rdev, wdev);
break;
case NETDEV_GOING_DOWN:
cfg80211_leave(rdev, wdev);
@@ -1238,7 +1258,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
list_for_each_entry_safe(pos, tmp,
&rdev->sched_scan_req_list, list) {
- if (WARN_ON(pos && pos->dev == wdev->netdev))
+ if (WARN_ON(pos->dev == wdev->netdev))
cfg80211_stop_sched_scan_req(rdev, pos, false);
}
@@ -1302,17 +1322,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
* remove and clean it up.
*/
if (!list_empty(&wdev->list)) {
- nl80211_notify_iface(rdev, wdev,
- NL80211_CMD_DEL_INTERFACE);
+ __cfg80211_unregister_wdev(wdev, false);
sysfs_remove_link(&dev->dev.kobj, "phy80211");
- list_del_rcu(&wdev->list);
- rdev->devlist_generation++;
- cfg80211_mlme_purge_registrations(wdev);
-#ifdef CONFIG_CFG80211_WEXT
- kzfree(wdev->wext.keys);
-#endif
- flush_work(&wdev->disconnect_wk);
- cfg80211_cqm_config_free(wdev);
}
/*
* synchronise (so that we won't find this netdev
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7f52ef569320..c61dbba8bf47 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -66,6 +66,7 @@ struct cfg80211_registered_device {
/* protected by RTNL only */
int num_running_ifaces;
int num_running_monitor_ifaces;
+ u64 cookie_counter;
/* BSSes/scanning */
spinlock_t bss_lock;
@@ -133,6 +134,16 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
#endif
}
+static inline u64 cfg80211_assign_cookie(struct cfg80211_registered_device *rdev)
+{
+ u64 r = ++rdev->cookie_counter;
+
+ if (WARN_ON(r == 0))
+ r = ++rdev->cookie_counter;
+
+ return r;
+}
+
extern struct workqueue_struct *cfg80211_wq;
extern struct list_head cfg80211_rdev_list;
extern int cfg80211_rdev_list_generation;
@@ -187,6 +198,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
struct net *net);
+void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
static inline void wdev_lock(struct wireless_dev *wdev)
__acquires(wdev)
{
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index e6bce1f130c9..b5e235573c8a 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -30,7 +30,7 @@
#include <net/iw_handler.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
+#include <linux/crypto.h>
#include <linux/crc32.h>
#include <net/lib80211.h>
@@ -64,9 +64,9 @@ struct lib80211_tkip_data {
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_cipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_cipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -99,8 +99,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
priv->tx_tfm_arc4 = NULL;
goto fail;
@@ -112,8 +111,7 @@ static void *lib80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
priv->rx_tfm_arc4 = NULL;
goto fail;
@@ -130,9 +128,9 @@ static void *lib80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_cipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_cipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -144,9 +142,9 @@ static void lib80211_tkip_deinit(void *priv)
struct lib80211_tkip_data *_priv = priv;
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_cipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_cipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -344,12 +342,10 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_tkip_data *tkey = priv;
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
int len;
u8 rc4key[16], *pos, *icv;
u32 crc;
- struct scatterlist sg;
- int err;
+ int i;
if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -374,14 +370,10 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, len + 4);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- return err;
+ crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ for (i = 0; i < len + 4; i++)
+ crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i);
+ return 0;
}
/*
@@ -400,7 +392,6 @@ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_tkip_data *tkey = priv;
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
u8 rc4key[16];
u8 keyidx, *pos;
u32 iv32;
@@ -408,9 +399,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct ieee80211_hdr *hdr;
u8 icv[4];
u32 crc;
- struct scatterlist sg;
int plen;
- int err;
+ int i;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -463,18 +453,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen + 4);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err) {
- net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
- hdr->addr2);
- return -7;
- }
+ crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ for (i = 0; i < plen + 4; i++)
+ crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
@@ -660,9 +641,9 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
struct lib80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index d05f58b0fd04..6015f6b542a6 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -22,7 +22,7 @@
#include <net/lib80211.h>
-#include <crypto/skcipher.h>
+#include <linux/crypto.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
@@ -35,8 +35,8 @@ struct lib80211_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_cipher *tx_tfm;
+ struct crypto_cipher *rx_tfm;
};
static void *lib80211_wep_init(int keyidx)
@@ -48,13 +48,13 @@ static void *lib80211_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
priv->rx_tfm = NULL;
goto fail;
@@ -66,8 +66,8 @@ static void *lib80211_wep_init(int keyidx)
fail:
if (priv) {
- crypto_free_skcipher(priv->tx_tfm);
- crypto_free_skcipher(priv->rx_tfm);
+ crypto_free_cipher(priv->tx_tfm);
+ crypto_free_cipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -77,8 +77,8 @@ static void lib80211_wep_deinit(void *priv)
{
struct lib80211_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_cipher(_priv->tx_tfm);
+ crypto_free_cipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -129,12 +129,10 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_wep_data *wep = priv;
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
u32 crc, klen, len;
u8 *pos, *icv;
- struct scatterlist sg;
u8 key[WEP_KEY_LEN + 3];
- int err;
+ int i;
/* other checks are in lib80211_wep_build_iv */
if (skb_tailroom(skb) < 4)
@@ -162,14 +160,12 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
- sg_init_one(&sg, pos, len + 4);
- skcipher_request_set_tfm(req, wep->tx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- return err;
+ crypto_cipher_setkey(wep->tx_tfm, key, klen);
+
+ for (i = 0; i < len + 4; i++)
+ crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i);
+
+ return 0;
}
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@@ -182,12 +178,10 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_wep_data *wep = priv;
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
u32 crc, klen, plen;
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos, icv[4];
- struct scatterlist sg;
- int err;
+ int i;
if (skb->len < hdr_len + 8)
return -1;
@@ -208,15 +202,9 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
- sg_init_one(&sg, pos, plen + 4);
- skcipher_request_set_tfm(req, wep->rx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err)
- return -7;
+ crypto_cipher_setkey(wep->rx_tfm, key, klen);
+ for (i = 0; i < plen + 4; i++)
+ crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a02bbdd1b192..744b5851bbf9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -200,7 +200,46 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
return __cfg80211_rdev_from_attrs(netns, info->attrs);
}
+static int validate_ie_attr(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *pos;
+ int len;
+
+ pos = nla_data(attr);
+ len = nla_len(attr);
+
+ while (len) {
+ u8 elemlen;
+
+ if (len < 2)
+ goto error;
+ len -= 2;
+
+ elemlen = pos[1];
+ if (elemlen > len)
+ goto error;
+
+ len -= elemlen;
+ pos += 2 + elemlen;
+ }
+
+ return 0;
+error:
+ NL_SET_ERR_MSG_ATTR(extack, attr, "malformed information elements");
+ return -EINVAL;
+}
+
/* policy for the attributes */
+static const struct nla_policy
+nl80211_ftm_responder_policy[NL80211_FTM_RESP_ATTR_MAX + 1] = {
+ [NL80211_FTM_RESP_ATTR_ENABLED] = { .type = NLA_FLAG, },
+ [NL80211_FTM_RESP_ATTR_LCI] = { .type = NLA_BINARY,
+ .len = U8_MAX },
+ [NL80211_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_BINARY,
+ .len = U8_MAX },
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
@@ -213,14 +252,14 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
[NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
- [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 },
- [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
+ [NL80211_ATTR_WIPHY_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_ATTR_WIPHY_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1),
[NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
[NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG },
- [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
+ [NL80211_ATTR_IFTYPE] = NLA_POLICY_MAX(NLA_U32, NL80211_IFTYPE_MAX),
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
@@ -230,24 +269,28 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
.len = WLAN_MAX_KEY_LEN },
- [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
+ [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5),
[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
- [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
+ [NL80211_ATTR_KEY_TYPE] =
+ NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES),
[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
[NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_BEACON_TAIL] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_STA_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_BEACON_TAIL] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_ATTR_STA_AID] =
+ NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 },
[NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_RATES },
- [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 },
+ [NL80211_ATTR_STA_PLINK_ACTION] =
+ NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_ACTIONS - 1),
[NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
@@ -270,8 +313,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
[NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
- [NL80211_ATTR_IE] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+ validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
[NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED },
@@ -281,7 +325,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG },
[NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG },
- [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 },
+ [NL80211_ATTR_USE_MFP] = NLA_POLICY_RANGE(NLA_U32,
+ NL80211_MFP_NO,
+ NL80211_MFP_OPTIONAL),
[NL80211_ATTR_STA_FLAGS2] = {
.len = sizeof(struct nl80211_sta_flag_update),
},
@@ -301,7 +347,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_FRAME] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
- [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_PS_STATE] = NLA_POLICY_RANGE(NLA_U32,
+ NL80211_PS_DISABLED,
+ NL80211_PS_ENABLED),
[NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
[NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
[NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
@@ -314,15 +362,23 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG },
[NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
[NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
- [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
+ [NL80211_ATTR_STA_PLINK_STATE] =
+ NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
+ [NL80211_ATTR_MESH_PEER_AID] =
+ NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
- [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
- [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_HIDDEN_SSID] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_HIDDEN_SSID_NOT_IN_USE,
+ NL80211_HIDDEN_SSID_ZERO_CONTENTS),
+ [NL80211_ATTR_IE_PROBE_RESP] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_ATTR_IE_ASSOC_RESP] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
[NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
[NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG },
@@ -348,9 +404,12 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
[NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
- [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
- [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
- [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
+ [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127),
+ [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_MESH_POWER_UNKNOWN + 1,
+ NL80211_MESH_POWER_MAX),
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -363,7 +422,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MDID] = { .type = NLA_U16 },
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_PEER_AID] =
+ NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
[NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
@@ -384,8 +444,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
[NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG },
- [NL80211_ATTR_TSID] = { .type = NLA_U8 },
- [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
+ [NL80211_ATTR_TSID] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_TIDS - 1),
+ [NL80211_ATTR_USER_PRIO] =
+ NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
[NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
@@ -395,12 +456,13 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
[NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
[NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
- [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 },
+ [NL80211_ATTR_STA_SUPPORT_P2P_PS] =
+ NLA_POLICY_MAX(NLA_U8, NUM_NL80211_P2P_PS_STATUS - 1),
[NL80211_ATTR_MU_MIMO_GROUP_DATA] = {
.len = VHT_MUMIMO_GROUPS_DATA_LEN
},
[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
- [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 },
+ [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1),
[NL80211_ATTR_BANDS] = { .type = NLA_U32 },
[NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
[NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
@@ -430,6 +492,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
[NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
.len = NL80211_HE_MAX_CAPABILITY_LEN },
+
+ [NL80211_ATTR_FTM_RESPONDER] = {
+ .type = NLA_NESTED,
+ .validation_data = nl80211_ftm_responder_policy,
+ },
};
/* policy for the key attributes */
@@ -440,7 +507,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
- [NL80211_KEY_TYPE] = { .type = NLA_U32 },
+ [NL80211_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES - 1),
[NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
};
@@ -491,7 +558,10 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
static const struct nla_policy
nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
[NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 },
- [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_CONDITION] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_COALESCE_CONDITION_MATCH,
+ NL80211_COALESCE_CONDITION_NO_MATCH),
[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED },
};
@@ -567,8 +637,7 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
[NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
};
-static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
+static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
struct wireless_dev **wdev)
{
@@ -582,7 +651,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
return err;
*wdev = __cfg80211_wdev_from_attrs(
- sock_net(skb->sk),
+ sock_net(cb->skb->sk),
genl_family_attrbuf(&nl80211_fam));
if (IS_ERR(*wdev))
return PTR_ERR(*wdev);
@@ -614,36 +683,6 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
return 0;
}
-/* IE validation */
-static bool is_valid_ie_attr(const struct nlattr *attr)
-{
- const u8 *pos;
- int len;
-
- if (!attr)
- return true;
-
- pos = nla_data(attr);
- len = nla_len(attr);
-
- while (len) {
- u8 elemlen;
-
- if (len < 2)
- return false;
- len -= 2;
-
- elemlen = pos[1];
- if (elemlen > len)
- return false;
-
- len -= elemlen;
- pos += 2 + elemlen;
- }
-
- return true;
-}
-
/* message building helper */
static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
int flags, u8 cmd)
@@ -858,12 +897,8 @@ static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
if (tb[NL80211_KEY_CIPHER])
k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]);
- if (tb[NL80211_KEY_TYPE]) {
+ if (tb[NL80211_KEY_TYPE])
k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
- if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
- return genl_err_attr(info, -EINVAL,
- tb[NL80211_KEY_TYPE]);
- }
if (tb[NL80211_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
@@ -910,13 +945,8 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
if (k->defmgmt)
k->def_multi = true;
- if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
+ if (info->attrs[NL80211_ATTR_KEY_TYPE])
k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
- if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
- GENL_SET_ERR_MSG(info, "key type out of range");
- return -EINVAL;
- }
- }
if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
@@ -2292,12 +2322,14 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
struct genl_info *info,
struct cfg80211_chan_def *chandef)
{
+ struct netlink_ext_ack *extack = info->extack;
+ struct nlattr **attrs = info->attrs;
u32 control_freq;
- if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ if (!attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
- control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -2305,14 +2337,16 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->center_freq2 = 0;
/* Primary channel not allowed */
- if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED)
+ if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) {
+ NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
+ "Channel is disabled");
return -EINVAL;
+ }
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
enum nl80211_channel_type chantype;
- chantype = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ chantype = nla_get_u32(attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
switch (chantype) {
case NL80211_CHAN_NO_HT:
@@ -2322,42 +2356,56 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
cfg80211_chandef_create(chandef, chandef->chan,
chantype);
/* user input for center_freq is incorrect */
- if (info->attrs[NL80211_ATTR_CENTER_FREQ1] &&
- chandef->center_freq1 != nla_get_u32(
- info->attrs[NL80211_ATTR_CENTER_FREQ1]))
+ if (attrs[NL80211_ATTR_CENTER_FREQ1] &&
+ chandef->center_freq1 != nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1])) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_CENTER_FREQ1],
+ "bad center frequency 1");
return -EINVAL;
+ }
/* center_freq2 must be zero */
- if (info->attrs[NL80211_ATTR_CENTER_FREQ2] &&
- nla_get_u32(info->attrs[NL80211_ATTR_CENTER_FREQ2]))
+ if (attrs[NL80211_ATTR_CENTER_FREQ2] &&
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2])) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_CENTER_FREQ2],
+ "center frequency 2 can't be used");
return -EINVAL;
+ }
break;
default:
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE],
+ "invalid channel type");
return -EINVAL;
}
- } else if (info->attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
+ } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
chandef->width =
- nla_get_u32(info->attrs[NL80211_ATTR_CHANNEL_WIDTH]);
- if (info->attrs[NL80211_ATTR_CENTER_FREQ1])
+ nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
+ if (attrs[NL80211_ATTR_CENTER_FREQ1])
chandef->center_freq1 =
- nla_get_u32(
- info->attrs[NL80211_ATTR_CENTER_FREQ1]);
- if (info->attrs[NL80211_ATTR_CENTER_FREQ2])
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
+ if (attrs[NL80211_ATTR_CENTER_FREQ2])
chandef->center_freq2 =
- nla_get_u32(
- info->attrs[NL80211_ATTR_CENTER_FREQ2]);
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]);
}
- if (!cfg80211_chandef_valid(chandef))
+ if (!cfg80211_chandef_valid(chandef)) {
+ NL_SET_ERR_MSG(extack, "invalid channel definition");
return -EINVAL;
+ }
if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
- IEEE80211_CHAN_DISABLED))
+ IEEE80211_CHAN_DISABLED)) {
+ NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
+ }
if ((chandef->width == NL80211_CHAN_WIDTH_5 ||
chandef->width == NL80211_CHAN_WIDTH_10) &&
- !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ))
+ !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) {
+ NL_SET_ERR_MSG(extack, "5/10 MHz not supported");
return -EINVAL;
+ }
return 0;
}
@@ -2617,8 +2665,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
retry_short = nla_get_u8(
info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]);
- if (retry_short == 0)
- return -EINVAL;
changed |= WIPHY_PARAM_RETRY_SHORT;
}
@@ -2626,8 +2672,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) {
retry_long = nla_get_u8(
info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]);
- if (retry_long == 0)
- return -EINVAL;
changed |= WIPHY_PARAM_RETRY_LONG;
}
@@ -3119,8 +3163,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
if (otype != ntype)
change = true;
- if (ntype > NL80211_IFTYPE_MAX)
- return -EINVAL;
}
if (info->attrs[NL80211_ATTR_MESH_ID]) {
@@ -3185,11 +3227,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_IFNAME])
return -EINVAL;
- if (info->attrs[NL80211_ATTR_IFTYPE]) {
+ if (info->attrs[NL80211_ATTR_IFTYPE])
type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
- if (type > NL80211_IFTYPE_MAX)
- return -EINVAL;
- }
if (!rdev->ops->add_virtual_intf ||
!(rdev->wiphy.interface_modes & (1 << type)))
@@ -3252,15 +3291,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
* P2P Device and NAN do not have a netdev, so don't go
* through the netdev notifier and must be added here
*/
- mutex_init(&wdev->mtx);
- INIT_LIST_HEAD(&wdev->event_list);
- spin_lock_init(&wdev->event_lock);
- INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
-
- wdev->identifier = ++rdev->wdev_id;
- list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
- rdev->devlist_generation++;
+ cfg80211_init_wdev(rdev, wdev);
break;
default:
break;
@@ -3272,15 +3303,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return -ENOBUFS;
}
- /*
- * For wdevs which have no associated netdev object (e.g. of type
- * NL80211_IFTYPE_P2P_DEVICE), emit the NEW_INTERFACE event here.
- * For all other types, the event will be generated from the
- * netdev notifier
- */
- if (!wdev->netdev)
- nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
-
return genlmsg_reply(msg, info);
}
@@ -3359,7 +3381,7 @@ static void get_key_callback(void *c, struct key_params *params)
params->cipher)))
goto nla_put_failure;
- if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+ if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx))
goto nla_put_failure;
nla_nest_end(cookie->msg, key);
@@ -3386,9 +3408,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_KEY_IDX])
key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
- if (key_idx > 5)
- return -EINVAL;
-
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -3396,8 +3415,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
- if (kt >= NUM_NL80211_KEYTYPES)
- return -EINVAL;
if (kt != NL80211_KEYTYPE_GROUP &&
kt != NL80211_KEYTYPE_PAIRWISE)
return -EINVAL;
@@ -3998,16 +4015,12 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
return 0;
}
-static int nl80211_parse_beacon(struct nlattr *attrs[],
+static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs[],
struct cfg80211_beacon_data *bcn)
{
bool haveinfo = false;
-
- if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) ||
- !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) ||
- !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
- !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP]))
- return -EINVAL;
+ int err;
memset(bcn, 0, sizeof(*bcn));
@@ -4052,6 +4065,35 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]);
}
+ if (attrs[NL80211_ATTR_FTM_RESPONDER]) {
+ struct nlattr *tb[NL80211_FTM_RESP_ATTR_MAX + 1];
+
+ err = nla_parse_nested(tb, NL80211_FTM_RESP_ATTR_MAX,
+ attrs[NL80211_ATTR_FTM_RESPONDER],
+ NULL, NULL);
+ if (err)
+ return err;
+
+ if (tb[NL80211_FTM_RESP_ATTR_ENABLED] &&
+ wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER))
+ bcn->ftm_responder = 1;
+ else
+ return -EOPNOTSUPP;
+
+ if (tb[NL80211_FTM_RESP_ATTR_LCI]) {
+ bcn->lci = nla_data(tb[NL80211_FTM_RESP_ATTR_LCI]);
+ bcn->lci_len = nla_len(tb[NL80211_FTM_RESP_ATTR_LCI]);
+ }
+
+ if (tb[NL80211_FTM_RESP_ATTR_CIVICLOC]) {
+ bcn->civicloc = nla_data(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]);
+ bcn->civicloc_len = nla_len(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]);
+ }
+ } else {
+ bcn->ftm_responder = -1;
+ }
+
return 0;
}
@@ -4198,7 +4240,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NL80211_ATTR_BEACON_HEAD])
return -EINVAL;
- err = nl80211_parse_beacon(info->attrs, &params.beacon);
+ err = nl80211_parse_beacon(rdev, info->attrs, &params.beacon);
if (err)
return err;
@@ -4228,14 +4270,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
- if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
+ if (info->attrs[NL80211_ATTR_HIDDEN_SSID])
params.hidden_ssid = nla_get_u32(
info->attrs[NL80211_ATTR_HIDDEN_SSID]);
- if (params.hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE &&
- params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_LEN &&
- params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_CONTENTS)
- return -EINVAL;
- }
params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
@@ -4265,8 +4302,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
params.p2p_ctwindow =
nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
- if (params.p2p_ctwindow > 127)
- return -EINVAL;
if (params.p2p_ctwindow != 0 &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
return -EINVAL;
@@ -4278,8 +4313,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
- if (tmp > 1)
- return -EINVAL;
params.p2p_opp_ps = tmp;
if (params.p2p_opp_ps != 0 &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
@@ -4382,7 +4415,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
if (!wdev->beacon_interval)
return -EINVAL;
- err = nl80211_parse_beacon(info->attrs, &params);
+ err = nl80211_parse_beacon(rdev, info->attrs, &params);
if (err)
return err;
@@ -4728,6 +4761,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc);
PUT_SINFO_U64(BEACON_RX, rx_beacon);
PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
+ PUT_SINFO(RX_MPDUS, rx_mpdu_count, u32);
+ PUT_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32);
if (wiphy_ext_feature_isset(&rdev->wiphy,
NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) {
PUT_SINFO(ACK_SIGNAL, ack_signal, u8);
@@ -4811,7 +4846,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err)
goto out_err;
@@ -5216,17 +5251,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
else
params.listen_interval = -1;
- if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
- u8 tmp;
-
- tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
- if (tmp >= NUM_NL80211_P2P_PS_STATUS)
- return -EINVAL;
-
- params.support_p2p_ps = tmp;
- } else {
+ if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS])
+ params.support_p2p_ps =
+ nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
+ else
params.support_p2p_ps = -1;
- }
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -5256,38 +5285,23 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
return -EINVAL;
- if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
+ if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
- if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
- return -EINVAL;
- }
if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) {
params.plink_state =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
- if (params.plink_state >= NUM_NL80211_PLINK_STATES)
- return -EINVAL;
- if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) {
+ if (info->attrs[NL80211_ATTR_MESH_PEER_AID])
params.peer_aid = nla_get_u16(
info->attrs[NL80211_ATTR_MESH_PEER_AID]);
- if (params.peer_aid > IEEE80211_MAX_AID)
- return -EINVAL;
- }
params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE;
}
- if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) {
- enum nl80211_mesh_power_mode pm = nla_get_u32(
+ if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE])
+ params.local_pm = nla_get_u32(
info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]);
- if (pm <= NL80211_MESH_POWER_UNKNOWN ||
- pm > NL80211_MESH_POWER_MAX)
- return -EINVAL;
-
- params.local_pm = pm;
- }
-
if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
params.opmode_notif_used = true;
params.opmode_notif =
@@ -5364,13 +5378,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
- u8 tmp;
-
- tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
- if (tmp >= NUM_NL80211_P2P_PS_STATUS)
- return -EINVAL;
-
- params.support_p2p_ps = tmp;
+ params.support_p2p_ps =
+ nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
} else {
/*
* if not specified, assume it's supported for P2P GO interface,
@@ -5384,8 +5393,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
else
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
- if (!params.aid || params.aid > IEEE80211_MAX_AID)
- return -EINVAL;
if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
params.capability =
@@ -5425,12 +5432,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
}
- if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
+ if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
- if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
- return -EINVAL;
- }
err = nl80211_parse_sta_channel_info(info, &params);
if (err)
@@ -5662,7 +5666,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err)
goto out_err;
@@ -5858,7 +5862,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err)
goto out_err;
@@ -5940,9 +5944,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
params.p2p_ctwindow =
- nla_get_s8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
- if (params.p2p_ctwindow < 0)
- return -EINVAL;
+ nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
if (params.p2p_ctwindow != 0 &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
return -EINVAL;
@@ -5954,8 +5956,6 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
- if (tmp > 1)
- return -EINVAL;
params.p2p_opp_ps = tmp;
if (params.p2p_opp_ps &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
@@ -6134,33 +6134,49 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
return -ENOBUFS;
}
-static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
- [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
- [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
- [NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 },
- [NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 },
- [NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
- [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
- [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
- [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
+static const struct nla_policy
+nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
+ [NL80211_MESHCONF_RETRY_TIMEOUT] =
+ NLA_POLICY_RANGE(NLA_U16, 1, 255),
+ [NL80211_MESHCONF_CONFIRM_TIMEOUT] =
+ NLA_POLICY_RANGE(NLA_U16, 1, 255),
+ [NL80211_MESHCONF_HOLDING_TIMEOUT] =
+ NLA_POLICY_RANGE(NLA_U16, 1, 255),
+ [NL80211_MESHCONF_MAX_PEER_LINKS] =
+ NLA_POLICY_RANGE(NLA_U16, 0, 255),
+ [NL80211_MESHCONF_MAX_RETRIES] = NLA_POLICY_MAX(NLA_U8, 16),
+ [NL80211_MESHCONF_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_MESHCONF_ELEMENT_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] =
+ NLA_POLICY_RANGE(NLA_U32, 1, 255),
[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
[NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
- [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = NLA_POLICY_MIN(NLA_U16, 1),
[NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
- [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
- [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
- [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
- [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_ROOTMODE] = NLA_POLICY_MAX(NLA_U8, 4),
+ [NL80211_MESHCONF_HWMP_RANN_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_MESHCONF_FORWARDING] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_MESHCONF_RSSI_THRESHOLD] =
+ NLA_POLICY_RANGE(NLA_S32, -255, 0),
[NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
- [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_POWER_MODE] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_MESH_POWER_ACTIVE,
+ NL80211_MESH_POWER_MAX),
[NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 },
[NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 },
};
@@ -6173,68 +6189,12 @@ static const struct nla_policy
[NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
[NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG },
- [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_MESH_SETUP_IE] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
[NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
};
-static int nl80211_check_bool(const struct nlattr *nla, u8 min, u8 max, bool *out)
-{
- u8 val = nla_get_u8(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_u8(const struct nlattr *nla, u8 min, u8 max, u8 *out)
-{
- u8 val = nla_get_u8(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_u16(const struct nlattr *nla, u16 min, u16 max, u16 *out)
-{
- u16 val = nla_get_u16(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_u32(const struct nlattr *nla, u32 min, u32 max, u32 *out)
-{
- u32 val = nla_get_u32(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *out)
-{
- s32 val = nla_get_s32(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_power_mode(const struct nlattr *nla,
- enum nl80211_mesh_power_mode min,
- enum nl80211_mesh_power_mode max,
- enum nl80211_mesh_power_mode *out)
-{
- u32 val = nla_get_u32(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
static int nl80211_parse_mesh_config(struct genl_info *info,
struct mesh_config *cfg,
u32 *mask_out)
@@ -6243,13 +6203,12 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
u32 mask = 0;
u16 ht_opmode;
-#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
-do { \
- if (tb[attr]) { \
- if (fn(tb[attr], min, max, &cfg->param)) \
- return -EINVAL; \
- mask |= (1 << (attr - 1)); \
- } \
+#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, mask, attr, fn) \
+do { \
+ if (tb[attr]) { \
+ cfg->param = fn(tb[attr]); \
+ mask |= BIT((attr) - 1); \
+ } \
} while (0)
if (!info->attrs[NL80211_ATTR_MESH_CONFIG])
@@ -6264,75 +6223,73 @@ do { \
BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32);
/* Fill in the params struct */
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255,
- mask, NL80211_MESHCONF_RETRY_TIMEOUT,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255,
- mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255,
- mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255,
- mask, NL80211_MESHCONF_MAX_PEER_LINKS,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16,
- mask, NL80211_MESHCONF_MAX_RETRIES,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255,
- mask, NL80211_MESHCONF_TTL, nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255,
- mask, NL80211_MESHCONF_ELEMENT_TTL,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1,
- mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
- nl80211_check_bool);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask,
+ NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask,
+ NL80211_MESHCONF_CONFIRM_TIMEOUT,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask,
+ NL80211_MESHCONF_HOLDING_TIMEOUT,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask,
+ NL80211_MESHCONF_MAX_PEER_LINKS,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask,
+ NL80211_MESHCONF_MAX_RETRIES, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask,
+ NL80211_MESHCONF_TTL, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask,
+ NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask,
+ NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
- 1, 255, mask,
+ mask,
NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255,
- mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535,
- mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535,
- mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
- nl80211_check_u16);
+ nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask,
+ NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+ nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask,
+ NL80211_MESHCONF_PATH_REFRESH_TIME,
+ nla_get_u32);
+ if (mask & BIT(NL80211_MESHCONF_PATH_REFRESH_TIME) &&
+ (cfg->path_refresh_time < 1 || cfg->path_refresh_time > 65535))
+ return -EINVAL;
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask,
+ NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
- 1, 65535, mask,
+ mask,
NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
- 1, 65535, mask,
+ nla_get_u32);
+ if (mask & BIT(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT) &&
+ (cfg->dot11MeshHWMPactivePathTimeout < 1 ||
+ cfg->dot11MeshHWMPactivePathTimeout > 65535))
+ return -EINVAL;
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask,
NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
- 1, 65535, mask,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, mask,
NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
- nl80211_check_u16);
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPnetDiameterTraversalTime,
- 1, 65535, mask,
+ dot11MeshHWMPnetDiameterTraversalTime, mask,
NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4,
- mask, NL80211_MESHCONF_HWMP_ROOTMODE,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535,
- mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshGateAnnouncementProtocol, 0, 1,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
+ NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol,
mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
- nl80211_check_bool);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
- mask, NL80211_MESHCONF_FORWARDING,
- nl80211_check_bool);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
- mask, NL80211_MESHCONF_RSSI_THRESHOLD,
- nl80211_check_s32);
+ nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, mask,
+ NL80211_MESHCONF_FORWARDING, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask,
+ NL80211_MESHCONF_RSSI_THRESHOLD,
+ nla_get_s32);
/*
* Check HT operation mode based on
* IEEE 802.11-2016 9.4.2.57 HT Operation element.
@@ -6351,29 +6308,27 @@ do { \
cfg->ht_opmode = ht_opmode;
mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
}
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
- 1, 65535, mask,
- NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535,
- mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
- nl80211_check_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPconfirmationInterval,
- 1, 65535, mask,
+ dot11MeshHWMPactivePathToRootTimeout, mask,
+ NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ nla_get_u32);
+ if (mask & BIT(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT) &&
+ (cfg->dot11MeshHWMPactivePathToRootTimeout < 1 ||
+ cfg->dot11MeshHWMPactivePathToRootTimeout > 65535))
+ return -EINVAL;
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, mask,
+ NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval,
+ mask,
NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode,
- NL80211_MESH_POWER_ACTIVE,
- NL80211_MESH_POWER_MAX,
- mask, NL80211_MESHCONF_POWER_MODE,
- nl80211_check_power_mode);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
- 0, 65535, mask,
- NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff,
- mask, NL80211_MESHCONF_PLINK_TIMEOUT,
- nl80211_check_u32);
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, mask,
+ NL80211_MESHCONF_POWER_MODE, nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, mask,
+ NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask,
+ NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32);
if (mask_out)
*mask_out = mask;
@@ -6416,8 +6371,6 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
if (tb[NL80211_MESH_SETUP_IE]) {
struct nlattr *ieattr =
tb[NL80211_MESH_SETUP_IE];
- if (!is_valid_ie_attr(ieattr))
- return -EINVAL;
setup->ie = nla_data(ieattr);
setup->ie_len = nla_len(ieattr);
}
@@ -7050,9 +7003,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
int err, tmp, n_ssids = 0, n_channels, i;
size_t ie_len;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
wiphy = &rdev->wiphy;
if (wdev->iftype == NL80211_IFTYPE_NAN)
@@ -7406,9 +7356,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
- if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
- return ERR_PTR(-EINVAL);
-
if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
@@ -7768,7 +7715,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
*/
if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) {
while (!sched_scan_req->reqid)
- sched_scan_req->reqid = rdev->wiphy.cookie_counter++;
+ sched_scan_req->reqid = cfg80211_assign_cookie(rdev);
}
err = rdev_sched_scan_start(rdev, dev, sched_scan_req);
@@ -7944,7 +7891,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
if (!need_new_beacon)
goto skip_beacons;
- err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
+ err = nl80211_parse_beacon(rdev, info->attrs, &params.beacon_after);
if (err)
return err;
@@ -7954,7 +7901,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- err = nl80211_parse_beacon(csa_attrs, &params.beacon_csa);
+ err = nl80211_parse_beacon(rdev, csa_attrs, &params.beacon_csa);
if (err)
return err;
@@ -8191,7 +8138,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err) {
rtnl_unlock();
return err;
@@ -8312,7 +8259,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
bool radio_stats;
rtnl_lock();
- res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (res)
goto out_err;
@@ -8376,9 +8323,6 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
struct key_parse key;
bool local_state_change;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -8617,9 +8561,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC] ||
!info->attrs[NL80211_ATTR_SSID] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -8743,9 +8684,6 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -8794,9 +8732,6 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -8871,9 +8806,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
memset(&ibss, 0, sizeof(ibss));
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_SSID] ||
!nla_len(info->attrs[NL80211_ATTR_SSID]))
return -EINVAL;
@@ -9311,9 +9243,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
memset(&connect, 0, sizeof(connect));
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_SSID] ||
!nla_len(info->attrs[NL80211_ATTR_SSID]))
return -EINVAL;
@@ -9372,11 +9301,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
!wiphy_ext_feature_isset(&rdev->wiphy,
NL80211_EXT_FEATURE_MFP_OPTIONAL))
return -EOPNOTSUPP;
-
- if (connect.mfp != NL80211_MFP_REQUIRED &&
- connect.mfp != NL80211_MFP_NO &&
- connect.mfp != NL80211_MFP_OPTIONAL)
- return -EINVAL;
} else {
connect.mfp = NL80211_MFP_NO;
}
@@ -9549,8 +9473,6 @@ static int nl80211_update_connect_params(struct sk_buff *skb,
return -EOPNOTSUPP;
if (info->attrs[NL80211_ATTR_IE]) {
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
changed |= UPDATE_ASSOC_IES;
@@ -10135,9 +10057,6 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]);
- if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED)
- return -EINVAL;
-
wdev = dev->ieee80211_ptr;
if (!rdev->ops->set_power_mgmt)
@@ -10700,8 +10619,7 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (!scan_plan)
return -ENOBUFS;
- if (!scan_plan ||
- nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
+ if (nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
req->scan_plans[i].interval) ||
(req->scan_plans[i].iterations &&
nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS,
@@ -11299,9 +11217,6 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
new_rule->condition =
nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
- if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH &&
- new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH)
- return -EINVAL;
if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
return -EINVAL;
@@ -11654,8 +11569,6 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
conf.master_pref =
nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
- if (!conf.master_pref)
- return -EINVAL;
if (info->attrs[NL80211_ATTR_BANDS]) {
u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
@@ -11773,7 +11686,7 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
if (!func)
return -ENOMEM;
- func->cookie = wdev->wiphy->cookie_counter++;
+ func->cookie = cfg80211_assign_cookie(rdev);
if (!tb[NL80211_NAN_FUNC_TYPE] ||
nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) {
@@ -12219,8 +12132,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_MDID] ||
- !info->attrs[NL80211_ATTR_IE] ||
- !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+ !info->attrs[NL80211_ATTR_IE])
return -EINVAL;
memset(&ft_params, 0, sizeof(ft_params));
@@ -12640,12 +12552,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]);
- if (tsid >= IEEE80211_NUM_TIDS)
- return -EINVAL;
-
up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]);
- if (up >= IEEE80211_NUM_UPS)
- return -EINVAL;
/* WMM uses TIDs 0-7 even for TSPEC */
if (tsid >= IEEE80211_FIRST_TSPEC_TSID) {
@@ -13003,6 +12910,76 @@ static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_ftm_responder_stats ftm_stats = {};
+ struct sk_buff *msg;
+ void *hdr;
+ struct nlattr *ftm_stats_attr;
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP || !wdev->beacon_interval)
+ return -EOPNOTSUPP;
+
+ err = rdev_get_ftm_responder_stats(rdev, dev, &ftm_stats);
+ if (err)
+ return err;
+
+ if (!ftm_stats.filled)
+ return -ENODATA;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_GET_FTM_RESPONDER_STATS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ ftm_stats_attr = nla_nest_start(msg, NL80211_ATTR_FTM_RESPONDER_STATS);
+ if (!ftm_stats_attr)
+ goto nla_put_failure;
+
+#define SET_FTM(field, name, type) \
+ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \
+ nla_put_ ## type(msg, NL80211_FTM_STATS_ ## name, \
+ ftm_stats.field)) \
+ goto nla_put_failure; } while (0)
+#define SET_FTM_U64(field, name) \
+ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \
+ nla_put_u64_64bit(msg, NL80211_FTM_STATS_ ## name, \
+ ftm_stats.field, NL80211_FTM_STATS_PAD)) \
+ goto nla_put_failure; } while (0)
+
+ SET_FTM(success_num, SUCCESS_NUM, u32);
+ SET_FTM(partial_num, PARTIAL_NUM, u32);
+ SET_FTM(failed_num, FAILED_NUM, u32);
+ SET_FTM(asap_num, ASAP_NUM, u32);
+ SET_FTM(non_asap_num, NON_ASAP_NUM, u32);
+ SET_FTM_U64(total_duration_ms, TOTAL_DURATION_MSEC);
+ SET_FTM(unknown_triggers_num, UNKNOWN_TRIGGERS_NUM, u32);
+ SET_FTM(reschedule_requests_num, RESCHEDULE_REQUESTS_NUM, u32);
+ SET_FTM(out_of_window_triggers_num, OUT_OF_WINDOW_TRIGGERS_NUM, u32);
+#undef SET_FTM
+
+ nla_nest_end(msg, ftm_stats_attr);
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -13914,6 +13891,13 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS,
+ .doit = nl80211_get_ftm_responder_stats,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 364f5d67f05b..51380b5c32f2 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1232,4 +1232,19 @@ rdev_external_auth(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats);
+ if (rdev->ops->get_ftm_responder_stats)
+ ret = rdev->ops->get_ftm_responder_stats(&rdev->wiphy, dev,
+ ftm_stats);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 148c229fe84f..ecfb1a06dbb2 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3831,6 +3831,15 @@ static int __init regulatory_init_db(void)
{
int err;
+ /*
+ * It's possible that - due to other bugs/issues - cfg80211
+ * never called regulatory_init() below, or that it failed;
+ * in that case, don't try to do any further work here as
+ * it's doomed to lead to crashes.
+ */
+ if (IS_ERR_OR_NULL(reg_pdev))
+ return -EINVAL;
+
err = load_builtin_regdb_keys();
if (err)
return err;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 5e7eec849200..c6a9446b4e6b 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2368,6 +2368,140 @@ TRACE_EVENT(rdev_external_auth,
__entry->bssid, __entry->ssid, __entry->status)
);
+TRACE_EVENT(rdev_start_radar_detection,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms),
+ TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ __field(u32, cac_time_ms)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(chandef);
+ __entry->cac_time_ms = cac_time_ms;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+ ", cac_time_ms=%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->cac_time_ms)
+);
+
+TRACE_EVENT(rdev_set_mcast_rate,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ int *mcast_rate),
+ TP_ARGS(wiphy, netdev, mcast_rate),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(int, mcast_rate, NUM_NL80211_BANDS)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memcpy(__entry->mcast_rate, mcast_rate,
+ sizeof(int) * NUM_NL80211_BANDS);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
+ "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->mcast_rate[NL80211_BAND_2GHZ],
+ __entry->mcast_rate[NL80211_BAND_5GHZ],
+ __entry->mcast_rate[NL80211_BAND_60GHZ])
+);
+
+TRACE_EVENT(rdev_set_coalesce,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce),
+ TP_ARGS(wiphy, coalesce),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(int, n_rules)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->n_rules = coalesce ? coalesce->n_rules : 0;
+ ),
+ TP_printk(WIPHY_PR_FMT ", n_rules=%d",
+ WIPHY_PR_ARG, __entry->n_rules)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_set_multicast_to_unicast,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const bool enabled),
+ TP_ARGS(wiphy, netdev, enabled),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(bool, enabled)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->enabled = enabled;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ BOOL_TO_STR(__entry->enabled))
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_get_txq_stats,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_get_ftm_responder_stats,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_ftm_responder_stats *ftm_stats),
+
+ TP_ARGS(wiphy, netdev, ftm_stats),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(u64, timestamp)
+ __field(u32, success_num)
+ __field(u32, partial_num)
+ __field(u32, failed_num)
+ __field(u32, asap_num)
+ __field(u32, non_asap_num)
+ __field(u64, duration)
+ __field(u32, unknown_triggers)
+ __field(u32, reschedule)
+ __field(u32, out_of_window)
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->success_num = ftm_stats->success_num;
+ __entry->partial_num = ftm_stats->partial_num;
+ __entry->failed_num = ftm_stats->failed_num;
+ __entry->asap_num = ftm_stats->asap_num;
+ __entry->non_asap_num = ftm_stats->non_asap_num;
+ __entry->duration = ftm_stats->total_duration_ms;
+ __entry->unknown_triggers = ftm_stats->unknown_triggers_num;
+ __entry->reschedule = ftm_stats->reschedule_requests_num;
+ __entry->out_of_window = ftm_stats->out_of_window_triggers_num;
+ ),
+
+ TP_printk(WIPHY_PR_FMT "Ftm responder stats: success %u, partial %u, "
+ "failed %u, asap %u, non asap %u, total duration %llu, unknown "
+ "triggers %u, rescheduled %u, out of window %u", WIPHY_PR_ARG,
+ __entry->success_num, __entry->partial_num, __entry->failed_num,
+ __entry->asap_num, __entry->non_asap_num, __entry->duration,
+ __entry->unknown_triggers, __entry->reschedule,
+ __entry->out_of_window)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -3160,105 +3294,6 @@ TRACE_EVENT(cfg80211_stop_iface,
TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
-
-TRACE_EVENT(rdev_start_radar_detection,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_chan_def *chandef,
- u32 cac_time_ms),
- TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- CHAN_DEF_ENTRY
- __field(u32, cac_time_ms)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- CHAN_DEF_ASSIGN(chandef);
- __entry->cac_time_ms = cac_time_ms;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", cac_time_ms=%u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->cac_time_ms)
-);
-
-TRACE_EVENT(rdev_set_mcast_rate,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- int *mcast_rate),
- TP_ARGS(wiphy, netdev, mcast_rate),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __array(int, mcast_rate, NUM_NL80211_BANDS)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- memcpy(__entry->mcast_rate, mcast_rate,
- sizeof(int) * NUM_NL80211_BANDS);
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
- "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
- WIPHY_PR_ARG, NETDEV_PR_ARG,
- __entry->mcast_rate[NL80211_BAND_2GHZ],
- __entry->mcast_rate[NL80211_BAND_5GHZ],
- __entry->mcast_rate[NL80211_BAND_60GHZ])
-);
-
-TRACE_EVENT(rdev_set_coalesce,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce),
- TP_ARGS(wiphy, coalesce),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- __field(int, n_rules)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- __entry->n_rules = coalesce ? coalesce->n_rules : 0;
- ),
- TP_printk(WIPHY_PR_FMT ", n_rules=%d",
- WIPHY_PR_ARG, __entry->n_rules)
-);
-
-DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
- TP_ARGS(wiphy, wdev)
-);
-
-TRACE_EVENT(rdev_set_multicast_to_unicast,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- const bool enabled),
- TP_ARGS(wiphy, netdev, enabled),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __field(bool, enabled)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- __entry->enabled = enabled;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s",
- WIPHY_PR_ARG, NETDEV_PR_ARG,
- BOOL_TO_STR(__entry->enabled))
-);
-
-TRACE_EVENT(rdev_get_txq_stats,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
- TP_ARGS(wiphy, wdev),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- WDEV_ENTRY
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- WDEV_ASSIGN;
- ),
- TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
-);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/samples/Kconfig b/samples/Kconfig
index bd133efc1a56..ad1ec7016d4c 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -1,5 +1,6 @@
menuconfig SAMPLES
bool "Sample kernel code"
+ depends on !UML
help
You can build and test sample kernel code here.
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 5a2d1c9578a0..54da4b070db3 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -219,7 +219,7 @@ else
sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
"$(if $(CONFIG_64BIT),64,32)" \
- "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
+ "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \
"$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
recordmcount_source := $(srctree)/scripts/recordmcount.pl
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index d78aed86af09..8ff8cb1a11f4 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -234,6 +234,7 @@ int main(int argc, char *argv[])
break;
default:
+ error = HV_E_FAIL;
syslog(LOG_ERR, "Unknown operation: %d",
buffer.hdr.operation);
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index efcaf6cac2eb..e46f51b17513 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -204,14 +204,23 @@ from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index f827bf77e9d2..e4bb82c8aba9 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -440,7 +440,11 @@ def branch_type_table(*x):
def sample_table(*x):
if branches:
- bind_exec(sample_query, 18, x)
+ for xx in x[0:15]:
+ sample_query.addBindValue(str(xx))
+ for xx in x[19:22]:
+ sample_query.addBindValue(str(xx))
+ do_query_(sample_query)
else:
bind_exec(sample_query, 22, x)
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c4acd2001db0..111ae858cbcb 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2286,7 +2286,8 @@ static int append_inlines(struct callchain_cursor *cursor,
if (!symbol_conf.inline_name || !map || !sym)
return ret;
- addr = map__rip_2objdump(map, ip);
+ addr = map__map_ip(map, ip);
+ addr = map__rip_2objdump(map, addr);
inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
if (!inline_node) {
@@ -2312,7 +2313,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
const char *srcline = NULL;
- u64 addr;
+ u64 addr = entry->ip;
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
@@ -2324,7 +2325,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
* Convert entry->ip from a virtual address to an offset in
* its corresponding binary.
*/
- addr = map__map_ip(entry->map, entry->ip);
+ if (entry->map)
+ addr = map__map_ip(entry->map, entry->ip);
srcline = callchain_srcline(entry->map, entry->sym, addr);
return callchain_cursor_append(cursor, entry->ip,
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 97efbcad076e..1942f6dd24f6 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -35,7 +35,7 @@ class install_lib(_install_lib):
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
if cc != "clang":
cflags += ['-Wno-cast-function-type' ]
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index 0150bb2741eb..0150bb2741eb 100644..100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 0e73698b2048..85d253546684 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -251,7 +251,7 @@ lldpad_app_wait_set()
{
local dev=$1; shift
- while lldptool -t -i $dev -V APP -c app | grep -q pending; do
+ while lldptool -t -i $dev -V APP -c app | grep -Eq "pending|unknown"; do
echo "$dev: waiting for lldpad to push pending APP updates"
sleep 5
done
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index 3a4042d918f0..f34672796044 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -11,10 +11,10 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
setup() {
ip netns add "${NETNS}"
ip -netns "${NETNS}" link set lo up
- ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 &> /dev/null
- ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 &> /dev/null
- ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 &> /dev/null
- ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 &> /dev/null
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
}
cleanup() {
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index b9cdb68df4c5..a369d616b390 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -142,6 +142,7 @@ dummy6_mask="64"
cleanup_done=1
err_buf=
+tcpdump_pids=
err() {
err_buf="${err_buf}${1}
@@ -284,7 +285,24 @@ setup() {
done
}
+trace() {
+ [ $tracing -eq 0 ] && return
+
+ for arg do
+ [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
+ ${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
+ tcpdump_pids="${tcpdump_pids} $!"
+ ns_cmd=
+ done
+ sleep 1
+}
+
cleanup() {
+ for pid in ${tcpdump_pids}; do
+ kill ${pid}
+ done
+ tcpdump_pids=
+
[ ${cleanup_done} -eq 1 ] && return
for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
ip netns del ${n} 2> /dev/null
@@ -357,6 +375,10 @@ test_pmtu_ipvX() {
family=${1}
setup namespaces routing || return 2
+ trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \
+ "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \
+ "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2
if [ ${family} -eq 4 ]; then
ping=ping
@@ -445,6 +467,8 @@ test_pmtu_ipv6_exception() {
test_pmtu_vti4_exception() {
setup namespaces veth vti4 xfrm4 || return 2
+ trace "${ns_a}" veth_a "${ns_b}" veth_b \
+ "${ns_a}" vti4_a "${ns_b}" vti4_b
veth_mtu=1500
vti_mtu=$((veth_mtu - 20))
@@ -473,6 +497,8 @@ test_pmtu_vti4_exception() {
test_pmtu_vti6_exception() {
setup namespaces veth vti6 xfrm6 || return 2
+ trace "${ns_a}" veth_a "${ns_b}" veth_b \
+ "${ns_a}" vti6_a "${ns_b}" vti6_b
fail=0
# Create route exception by exceeding link layer MTU
@@ -641,15 +667,56 @@ test_pmtu_vti6_link_change_mtu() {
return ${fail}
}
-trap cleanup EXIT
+usage() {
+ echo
+ echo "$0 [OPTIONS] [TEST]..."
+ echo "If no TEST argument is given, all tests will be run."
+ echo
+ echo "Options"
+ echo " --trace: capture traffic to TEST_INTERFACE.pcap"
+ echo
+ echo "Available tests${tests}"
+ exit 1
+}
exitcode=0
desc=0
IFS="
"
+
+tracing=0
+for arg do
+ if [ "${arg}" != "${arg#--*}" ]; then
+ opt="${arg#--}"
+ if [ "${opt}" = "trace" ]; then
+ if which tcpdump > /dev/null 2>&1; then
+ tracing=1
+ else
+ echo "=== tcpdump not available, tracing disabled"
+ fi
+ else
+ usage
+ fi
+ else
+ # Check first that all requested tests are available before
+ # running any
+ command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
+ fi
+done
+
+trap cleanup EXIT
+
for t in ${tests}; do
[ $desc -eq 0 ] && name="${t}" && desc=1 && continue || desc=0
+ run_this=1
+ for arg do
+ [ "${arg}" != "${arg#--*}" ] && continue
+ [ "${arg}" = "${name}" ] && run_this=1 && break
+ run_this=0
+ done
+ [ $run_this -eq 0 ] && continue
+
(
unset IFS
eval test_${name}
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 08c341b49760..e101af52d1d6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# This test is for checking rtnetlink callpaths, and get as much coverage as possible.
#
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 850767befa47..99e537ab5ad9 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# Run a series of udpgso benchmarks