aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mdio20
-rw-r--r--Documentation/ABI/testing/sysfs-tty3
-rw-r--r--Documentation/devicetree/bindings/mmc/atmel-hsmci.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt6
-rw-r--r--Documentation/devicetree/bindings/net/arc_emac.txt11
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt121
-rw-r--r--Documentation/devicetree/bindings/net/can/sja1000.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cavium-mix.txt7
-rw-r--r--Documentation/devicetree/bindings/net/cavium-pip.txt7
-rw-r--r--Documentation/devicetree/bindings/net/cdns-emac.txt6
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt5
-rw-r--r--Documentation/devicetree/bindings/net/davicom-dm9000.txt2
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt3
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt25
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt5
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt13
-rw-r--r--Documentation/devicetree/bindings/net/lpc-eth.txt5
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt6
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt6
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt4
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8851.txt3
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt55
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt3
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt5
-rw-r--r--Documentation/devicetree/bindings/net/sti-dwmac.txt58
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt7
-rw-r--r--Documentation/devicetree/bindings/power/bq2415x.txt47
-rw-r--r--Documentation/devicetree/bindings/spi/spi_atmel.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt6
-rw-r--r--Documentation/i2c/instantiating-devices41
-rw-r--r--Documentation/networking/3c505.txt45
-rw-r--r--Documentation/networking/bonding.txt96
-rw-r--r--Documentation/networking/gianfar.txt30
-rw-r--r--Documentation/networking/phy.txt9
-rw-r--r--Documentation/networking/tcp.txt2
-rw-r--r--Documentation/phy.txt26
-rw-r--r--Documentation/spi/spi-summary17
-rw-r--r--Documentation/zh_CN/arm64/booting.txt65
-rw-r--r--Documentation/zh_CN/arm64/memory.txt46
-rw-r--r--Documentation/zh_CN/arm64/tagged-pointers.txt52
-rw-r--r--MAINTAINERS21
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts229
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts10
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi10
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h15
-rw-r--r--arch/arm/include/asm/spinlock.h15
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mach-hisi/Kconfig2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c3
-rw-r--r--arch/arm/mach-imx/clk-imx6sl.c3
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c2
-rw-r--r--arch/arm/mach-moxart/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-pxa/am300epd.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/balloon3.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/corgi.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/csb726.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/gumstix.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/idp.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/palmld.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmt5.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtc.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtx.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm027.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm990_baseboard.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/poodle.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/spitz.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/tosa.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/trizeps4.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-zynq/common.c14
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/mimc200/fram.c1
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/eeh.h21
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h26
-rw-r--r--arch/powerpc/include/asm/pgtable.h22
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/include/asm/vdso.h6
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/eeh.c32
-rw-r--r--arch/powerpc/kernel/eeh_driver.c8
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/misc_32.S5
-rw-r--r--arch/powerpc/kernel/reloc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c14
-rw-r--r--arch/powerpc/mm/pgtable_64.c12
-rw-r--r--arch/powerpc/mm/subpage-prot.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c144
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c32
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c84
-rw-r--r--arch/powerpc/platforms/powernv/pci.c10
-rw-r--r--arch/powerpc/platforms/powernv/pci.h6
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h8
-rw-r--r--arch/powerpc/platforms/powernv/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/xmon/xmon.c24
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/ftrace.c83
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c2
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/platform/efi/efi_32.c6
-rw-r--r--arch/x86/platform/efi/efi_64.c9
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c101
-rw-r--r--block/blk-lib.c8
-rw-r--r--block/blk-merge.c91
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c143
-rw-r--r--block/blk-mq.h4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-timeout.c2
-rw-r--r--block/blk.h2
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/dock.c5
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/nicstar.c8
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/dma-buf.c25
-rw-r--r--drivers/block/null_blk.c97
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c66
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/clocksource/bcm_kona_timer.c54
-rw-r--r--drivers/cpufreq/intel_pstate.c9
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/mv_xor.c24
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c10
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-intel-mid.c4
-rw-r--r--drivers/gpio/gpio-xtensa.c16
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c31
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c15
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c19
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h122
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c16
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-hyperv.c11
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c33
-rw-r--r--drivers/iio/accel/bma180.c16
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c10
-rw-r--r--drivers/iio/light/tsl2563.c16
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/mag3110.c8
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c185
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c7
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
-rw-r--r--drivers/misc/genwqe/card_dev.c1
-rw-r--r--drivers/misc/mei/client.c11
-rw-r--r--drivers/misc/mic/host/mic_virtio.c3
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_3ad.c44
-rw-r--r--drivers/net/bonding/bond_3ad.h176
-rw-r--r--drivers/net/bonding/bond_alb.c134
-rw-r--r--drivers/net/bonding/bond_debugfs.c10
-rw-r--r--drivers/net/bonding/bond_main.c318
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/bonding/bond_options.c126
-rw-r--r--drivers/net/bonding/bond_procfs.c8
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/bonding/bonding.h26
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_spi.c1
-rw-r--r--drivers/net/can/at91_can.c9
-rw-r--r--drivers/net/can/dev.c24
-rw-r--r--drivers/net/can/sja1000/Kconfig13
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/sja1000.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c220
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c194
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/dummy.c8
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1127
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c124
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c164
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c253
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2595
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h630
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c464
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c111
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c22
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h38
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c202
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c500
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h44
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c64
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c340
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c102
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h47
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c242
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c32
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c41
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c16
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c73
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h61
-rw-r--r--drivers/net/ethernet/sfc/efx.c26
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c21
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c92
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c21
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c13
-rw-r--r--drivers/net/hyperv/hyperv_net.h61
-rw-r--r--drivers/net/hyperv/netvsc.c87
-rw-r--r--drivers/net/hyperv/netvsc_drv.c57
-rw-r--r--drivers/net/hyperv/rndis_filter.c41
-rw-r--r--drivers/net/ieee802154/at86rf230.c433
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/loopback.c11
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/nlmon.c11
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/bcm7xxx.c343
-rw-r--r--drivers/net/phy/broadcom.c52
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/mdio_bus.c20
-rw-r--r--drivers/net/phy/phy.c51
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c9
-rw-r--r--drivers/net/usb/r8152.c711
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/sr9800.c6
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/veth.c19
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c110
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c235
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c1495
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c266
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c72
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c64
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c32
-rw-r--r--drivers/net/wireless/b43/main.h35
-rw-r--r--drivers/net/wireless/b43/xmit.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1029
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c536
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c972
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h231
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h89
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c83
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c22
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c138
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c112
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c285
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h128
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c67
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c76
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c498
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h168
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c257
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c385
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c203
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c99
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c82
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c267
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c5
-rw-r--r--drivers/net/wireless/libertas/cfg.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c90
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h2
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c192
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h2
-rw-r--r--drivers/net/wireless/mwifiex/11n.c51
-rw-r--r--drivers/net/wireless/mwifiex/11n.h44
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c27
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c184
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c203
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c54
-rw-r--r--drivers/net/wireless/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/mwifiex/fw.h181
-rw-r--r--drivers/net/wireless/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h22
-rw-r--r--drivers/net/wireless/mwifiex/join.c14
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h90
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c138
-rw-r--r--drivers/net/wireless/mwifiex/scan.c598
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c373
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c112
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c40
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c13
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c1044
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c118
-rw-r--r--drivers/net/wireless/mwifiex/util.c114
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c96
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c14
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h10
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h10
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c67
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h53
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c85
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c192
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c45
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h27
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h86
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/base.c88
-rw-r--r--drivers/of/of_mdio.c22
-rw-r--r--drivers/of/of_net.c26
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/phy/phy-core.c62
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/regulator/da9055-regulator.c4
-rw-r--r--drivers/regulator/max14577.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/ion/compat_ion.c26
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h1
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c6
-rw-r--r--drivers/staging/android/sw_sync.h17
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/bcm/Bcmnet.c2
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c17
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c6
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c3
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c13
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c6
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c55
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c22
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/netlogic/xlr_net.c7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c273
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/rtl8821ae/Kconfig2
-rw-r--r--drivers/staging/rtl8821ae/rc.c1
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c5
-rw-r--r--drivers/tty/hvc/hvc_opal.c8
-rw-r--r--drivers/tty/hvc/hvc_rtas.c12
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_xen.c17
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_tty.c14
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c4
-rw-r--r--drivers/tty/tty_io.c25
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/core/driver.c24
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/hcd.c11
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/host/xhci-dbg.c6
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci.c38
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/phy/phy.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vhost/net.c47
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/omap2/dss/dispc.c16
-rw-r--r--drivers/video/omap2/dss/dpi.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/xencomm.c219
-rw-r--r--fs/bio-integrity.c13
-rw-r--r--fs/bio.c15
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c16
-rw-r--r--fs/btrfs/send.c10
-rw-r--r--fs/btrfs/super.c11
-rw-r--r--fs/btrfs/sysfs.c10
-rw-r--r--fs/ceph/acl.c11
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/super.c32
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/ceph/xattr.c54
-rw-r--r--fs/cifs/cifsacl.c33
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifsproto.h6
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c39
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2glob.h3
-rw-r--r--fs/cifs/smb2ops.c14
-rw-r--r--fs/cifs/smb2pdu.c4
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/resize.c34
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/fscache/object-list.c5
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/lockd/svclock.c8
-rw-r--r--fs/nfsd/nfs4acl.c9
-rw-r--r--fs/reiserfs/do_balan.c895
-rw-r--r--include/asm-generic/pgtable.h39
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/ttm/ttm_page_alloc.h2
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/brcmphy.h60
-rw-r--r--include/linux/can/dev.h2
-rw-r--r--include/linux/ceph/ceph_fs.h5
-rw-r--r--include/linux/compiler-gcc4.h6
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/ethtool.h3
-rw-r--r--include/linux/gpio/consumer.h4
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/ieee80211.h199
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/netdevice.h58
-rw-r--r--include/linux/nl802154.h12
-rw-r--r--include/linux/of.h153
-rw-r--r--include/linux/of_device.h4
-rw-r--r--include/linux/phy.h56
-rw-r--r--include/linux/phy/phy.h14
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--include/linux/spi/spi.h7
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/net/act_api.h22
-rw-r--r--include/net/cfg80211.h43
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/mac80211.h52
-rw-r--r--include/net/mac802154.h35
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--include/net/tc_act/tc_csum.h4
-rw-r--r--include/net/tc_act/tc_defact.h4
-rw-r--r--include/net/tc_act/tc_gact.h4
-rw-r--r--include/net/tc_act/tc_ipt.h4
-rw-r--r--include/net/tc_act/tc_mirred.h4
-rw-r--r--include/net/tc_act/tc_nat.h4
-rw-r--r--include/net/tc_act/tc_pedit.h4
-rw-r--r--include/net/tc_act/tc_skbedit.h4
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/wpan-phy.h19
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/power.h7
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/can.h32
-rw-r--r--include/uapi/linux/ethtool.h439
-rw-r--r--include/uapi/linux/mic_ioctl.h2
-rw-r--r--include/uapi/linux/nl80211.h52
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/xen/Kbuild2
-rw-r--r--include/uapi/xen/gntalloc.h (renamed from include/xen/gntalloc.h)0
-rw-r--r--include/uapi/xen/gntdev.h (renamed from include/xen/gntdev.h)0
-rw-r--r--include/xen/interface/io/blkif.h34
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/xencomm.h77
-rw-r--r--kernel/irq/devres.c45
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/time/jiffies.c6
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--lib/percpu_ida.c7
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/mprotect.c25
-rw-r--r--net/8021q/vlan_dev.c11
-rw-r--r--net/appletalk/aarp.c4
-rw-r--r--net/appletalk/ddp.c100
-rw-r--r--net/batman-adv/bat_iv_ogm.c30
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/hard-interface.c22
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/send.c9
-rw-r--r--net/batman-adv/translation-table.c23
-rw-r--r--net/bluetooth/hidp/core.c16
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/dev.c22
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/request_sock.c1
-rw-r--r--net/core/rtnetlink.c126
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/hsr/hsr_device.c10
-rw-r--r--net/hsr/hsr_framereg.c20
-rw-r--r--net/hsr/hsr_main.c4
-rw-r--r--net/ieee802154/ieee802154.h1
-rw-r--r--net/ieee802154/netlink.c1
-rw-r--r--net/ieee802154/nl-phy.c200
-rw-r--r--net/ieee802154/nl_policy.c10
-rw-r--r--net/ieee802154/wpan-class.c10
-rw-r--r--net/ipv4/ip_forward.c78
-rw-r--r--net/ipv4/ip_sockglue.c19
-rw-r--r--net/ipv4/ip_tunnel.c25
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_cong.c10
-rw-r--r--net/ipv4/tcp_highspeed.c1
-rw-r--r--net/ipv4/tcp_hybla.c1
-rw-r--r--net/ipv4/tcp_illinois.c1
-rw-r--r--net/ipv4/tcp_lp.c1
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_scalable.c1
-rw-r--r--net/ipv4/tcp_vegas.c1
-rw-r--r--net/ipv4/tcp_westwood.c1
-rw-r--r--net/ipv4/tcp_yeah.c1
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/addrlabel.c59
-rw-r--r--net/ipv6/ip6_flowlabel.c6
-rw-r--r--net/ipv6/ip6_gre.c9
-rw-r--r--net/ipv6/ip6_output.c17
-rw-r--r--net/ipv6/ip6_tunnel.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/sit.c18
-rw-r--r--net/ipx/af_ipx.c28
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c198
-rw-r--r--net/mac80211/cfg.h2
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ibss.c28
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/main.c9
-rw-r--r--net/mac80211/mesh.c96
-rw-r--r--net/mac80211/mlme.c41
-rw-r--r--net/mac80211/rate.c46
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.h2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rc80211_pid_algo.c2
-rw-r--r--net/mac80211/rx.c101
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c3
-rw-r--r--net/mac80211/tx.c33
-rw-r--r--net/mac80211/util.c42
-rw-r--r--net/mac80211/vht.c26
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mac802154/ieee802154_dev.c67
-rw-r--r--net/mac802154/mib.c4
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/datapath.c20
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/vport.c10
-rw-r--r--net/packet/af_packet.c26
-rw-r--r--net/rfkill/core.c9
-rw-r--r--net/sched/act_api.c142
-rw-r--r--net/sched/act_csum.c31
-rw-r--r--net/sched/act_gact.c34
-rw-r--r--net/sched/act_ipt.c68
-rw-r--r--net/sched/act_mirred.c56
-rw-r--r--net/sched/act_nat.c33
-rw-r--r--net/sched/act_pedit.c45
-rw-r--r--net/sched/act_police.c22
-rw-r--r--net/sched/act_simple.c64
-rw-r--r--net/sched/act_skbedit.c36
-rw-r--r--net/sched/sch_netem.c79
-rw-r--r--net/sched/sch_pie.c21
-rw-r--r--net/sctp/associola.c82
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c47
-rw-r--r--net/sctp/sysctl.c18
-rw-r--r--net/sctp/transport.c1
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/socket.c13
-rw-r--r--net/tipc/addr.h2
-rw-r--r--net/tipc/bcast.c28
-rw-r--r--net/tipc/bcast.h4
-rw-r--r--net/tipc/bearer.c42
-rw-r--r--net/tipc/bearer.h7
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/link.c615
-rw-r--r--net/tipc/link.h57
-rw-r--r--net/tipc/name_distr.c8
-rw-r--r--net/tipc/name_distr.h2
-rw-r--r--net/tipc/net.c10
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/port.c97
-rw-r--r--net/tipc/port.h26
-rw-r--r--net/tipc/socket.c188
-rw-r--r--net/wireless/ap.c3
-rw-r--r--net/wireless/chan.c25
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/ibss.c19
-rw-r--r--net/wireless/mesh.c6
-rw-r--r--net/wireless/mlme.c2
-rw-r--r--net/wireless/nl80211.c229
-rw-r--r--net/wireless/nl80211.h2
-rw-r--r--net/wireless/reg.c188
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/trace.h23
-rw-r--r--net/wireless/util.c5
-rw-r--r--scripts/mod/file2alias.c4
-rw-r--r--sound/pci/hda/hda_codec.c34
-rw-r--r--sound/pci/hda/hda_generic.c8
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_realtek.c14
-rw-r--r--sound/pci/hda/patch_sigmatel.c27
-rw-r--r--sound/pci/hda/thinkpad_helper.c1
-rw-r--r--virt/kvm/arm/vgic.c1
-rw-r--r--virt/kvm/coalesced_mmio.c8
998 files changed, 25774 insertions, 15087 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-mdio b/Documentation/ABI/testing/sysfs-bus-mdio
index 6349749ebc29..491baaf4285f 100644
--- a/Documentation/ABI/testing/sysfs-bus-mdio
+++ b/Documentation/ABI/testing/sysfs-bus-mdio
@@ -7,3 +7,23 @@ Description:
by the device during bus enumeration, encoded in hexadecimal.
This ID is used to match the device with the appropriate
driver.
+
+What: /sys/bus/mdio_bus/devices/.../phy_interface
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ This attribute contains the PHY interface as configured by the
+ Ethernet driver during bus enumeration, encoded in string.
+ This interface mode is used to configure the Ethernet MAC with the
+ appropriate mode for its data lines to the PHY hardware.
+
+What: /sys/bus/mdio_bus/devices/.../phy_has_fixups
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ This attribute contains the boolean value whether a given PHY
+ device has had any "fixup" workaround running on it, encoded as
+ a boolean. This information is provided to help troubleshooting
+ PHY configurations.
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty
index ad22fb0ee765..a2ccec35ffce 100644
--- a/Documentation/ABI/testing/sysfs-tty
+++ b/Documentation/ABI/testing/sysfs-tty
@@ -3,7 +3,8 @@ Date: Nov 2010
Contact: Kay Sievers <kay.sievers@vrfy.org>
Description:
Shows the list of currently configured
- console devices, like 'tty1 ttyS0'.
+ tty devices used for the console,
+ like 'tty1 ttyS0'.
The last entry in the file is the active
device connected to /dev/console.
The file supports poll() to detect virtual
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
index 0a85c70cd30a..07ad02075a93 100644
--- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
+++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
@@ -13,6 +13,9 @@ Required properties:
- #address-cells: should be one. The cell is the slot id.
- #size-cells: should be zero.
- at least one slot node
+- clock-names: tuple listing input clock names.
+ Required elements: "mci_clk"
+- clocks: phandles to input clocks.
The node contains child nodes for each slot that the platform uses
@@ -24,6 +27,8 @@ mmc0: mmc@f0008000 {
interrupts = <12 4>;
#address-cells = <1>;
#size-cells = <0>;
+ clock-names = "mci_clk";
+ clocks = <&mci0_clk>;
[ child node definitions...]
};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index 863d5b8155c7..10640b17c866 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -5,13 +5,9 @@ Required properties:
"allwinner,sun4i-emac")
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
- property, a single integer).
+- phy: see ethernet.txt file in the same directory.
- clocks: A phandle to the reference clock for this device
-Optional properties:
-- (local-)mac-address: mac address to be used by this driver
-
Example:
emac: ethernet@01c0b000 {
diff --git a/Documentation/devicetree/bindings/net/arc_emac.txt b/Documentation/devicetree/bindings/net/arc_emac.txt
index bcbc3f009158..7fbb027218a1 100644
--- a/Documentation/devicetree/bindings/net/arc_emac.txt
+++ b/Documentation/devicetree/bindings/net/arc_emac.txt
@@ -6,19 +6,12 @@ Required properties:
- interrupts: Should contain the EMAC interrupts
- clock-frequency: CPU frequency. It is needed to calculate and set polling
period of EMAC.
-- max-speed: Maximum supported data-rate in Mbit/s. In some HW configurations
-bandwidth of external memory controller might be a limiting factor. That's why
-it's required to specify which data-rate is supported on current SoC or FPGA.
-For example if only 10 Mbit/s is supported (10BASE-T) set "10". If 100 Mbit/s is
-supported (100BASE-TX) set "100".
-- phy: PHY device attached to the EMAC via MDIO bus
+- max-speed: see ethernet.txt file in the same directory.
+- phy: see ethernet.txt file in the same directory.
Child nodes of the driver are the individual PHY devices connected to the
MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
-Optional properties:
-- mac-address: 6 bytes, mac address
-
Examples:
ethernet@c0fc2000 {
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
new file mode 100644
index 000000000000..f2febb94550e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
@@ -0,0 +1,121 @@
+* Broadcom BCM7xxx Ethernet Controller (GENET)
+
+Required properties:
+- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
+ "brcm,genet-v3", "brcm,genet-v4".
+- reg: address and length of the register set for the device
+- interrupts: must be two cells, the first cell is the general purpose
+ interrupt line, while the second cell is the interrupt for the ring
+ RX and TX queues operating in ring mode
+- phy-mode: see ethernet.txt file in the same directory
+- #address-cells: should be 1
+- #size-cells: should be 1
+
+Optional properties:
+- clocks: When provided, must be two phandles to the functional clocks nodes
+ of the GENET block. The first phandle is the main GENET clock used during
+ normal operation, while the second phandle is the Wake-on-LAN clock.
+- clock-names: When provided, names of the functional clock phandles, first
+ name should be "enet" and second should be "enet-wol".
+
+- phy-handle: See ethernet.txt file in the same directory; used to describe
+ configurations where a PHY (internal or external) is used.
+
+- fixed-link: When the GENET interface is connected to a MoCA hardware block or
+ when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
+ voluntarily disabled, this property should be used to describe the "fixed link".
+ See Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for information on
+ the property specifics
+
+Required child nodes:
+
+- mdio bus node: this node should always be present regarless of the PHY
+ configuration of the GENET instance
+
+MDIO bus node required properties:
+
+- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
+ "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the
+ parent node compatible property (e.g: brcm,genet-v4 pairs with
+ brcm,genet-mdio-v4)
+- reg: address and length relative to the parent node base register address
+- #address-cells: address cell for MDIO bus addressing, should be 1
+- #size-cells: size of the cells for MDIO bus addressing, should be 0
+
+Ethernet PHY node properties:
+
+See Documentation/devicetree/bindings/net/phy.txt for the list of required and
+optional properties.
+
+Internal Gigabit PHY example:
+
+ethernet@f0b60000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xf0b60000 0xfc4c>;
+ interrupts = <0x0 0x14 0x0>, <0x0 0x15 0x0>;
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <1000>;
+ reg = <0x1>;
+ compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22";
+ };
+ };
+};
+
+MoCA interface / MAC to MAC example:
+
+ethernet@f0b80000 {
+ phy-mode = "moca";
+ fixed-link = <1 0 1000 0 0>;
+ mac-address = [ 00 10 18 36 24 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xf0b80000 0xfc4c>;
+ interrupts = <0x0 0x16 0x0>, <0x0 0x17 0x0>;
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+ };
+};
+
+
+External MDIO-connected Gigabit PHY/switch:
+
+ethernet@f0ba0000 {
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ mac-address = [ 00 10 18 36 26 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xf0ba0000 0xfc4c>;
+ interrupts = <0x0 0x18 0x0>, <0x0 0x19 0x0>;
+
+ mdio@0e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy0: ethernet-phy@0 {
+ max-speed = <1000>;
+ reg = <0x0>;
+ compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt
index f2105a47ec87..b4a6d53fb01a 100644
--- a/Documentation/devicetree/bindings/net/can/sja1000.txt
+++ b/Documentation/devicetree/bindings/net/can/sja1000.txt
@@ -12,6 +12,10 @@ Required properties:
Optional properties:
+- reg-io-width : Specify the size (in bytes) of the IO accesses that
+ should be performed on the device. Valid value is 1, 2 or 4.
+ Default to 1 (8 bits).
+
- nxp,external-clock-frequency : Frequency of the external oscillator
clock in Hz. Note that the internal clock frequency used by the
SJA1000 is half of that value. If not specified, a default value
diff --git a/Documentation/devicetree/bindings/net/cavium-mix.txt b/Documentation/devicetree/bindings/net/cavium-mix.txt
index 5da628db68bf..8d7c3096390f 100644
--- a/Documentation/devicetree/bindings/net/cavium-mix.txt
+++ b/Documentation/devicetree/bindings/net/cavium-mix.txt
@@ -18,12 +18,7 @@ Properties:
- interrupts: Two interrupt specifiers. The first is the MIX
interrupt routing and the second the routing for the AGL interrupts.
-- mac-address: Optional, the MAC address to assign to the device.
-
-- local-mac-address: Optional, the MAC address to assign to the device
- if mac-address is not specified.
-
-- phy-handle: Optional, a phandle for the PHY device connected to this device.
+- phy-handle: Optional, see ethernet.txt file in the same directory.
Example:
ethernet@1070000100800 {
diff --git a/Documentation/devicetree/bindings/net/cavium-pip.txt b/Documentation/devicetree/bindings/net/cavium-pip.txt
index d4c53ba04b3b..7dbd158810d2 100644
--- a/Documentation/devicetree/bindings/net/cavium-pip.txt
+++ b/Documentation/devicetree/bindings/net/cavium-pip.txt
@@ -35,12 +35,7 @@ Properties for PIP port which is a child the PIP interface:
- reg: The port number within the interface group.
-- mac-address: Optional, the MAC address to assign to the device.
-
-- local-mac-address: Optional, the MAC address to assign to the device
- if mac-address is not specified.
-
-- phy-handle: Optional, a phandle for the PHY device connected to this device.
+- phy-handle: Optional, see ethernet.txt file in the same directory.
Example:
diff --git a/Documentation/devicetree/bindings/net/cdns-emac.txt b/Documentation/devicetree/bindings/net/cdns-emac.txt
index 09055c2495f0..abd67c13d344 100644
--- a/Documentation/devicetree/bindings/net/cdns-emac.txt
+++ b/Documentation/devicetree/bindings/net/cdns-emac.txt
@@ -6,11 +6,7 @@ Required properties:
or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii".
-
-Optional properties:
-- local-mac-address: 6 bytes, mac address
+- phy-mode: see ethernet.txt file in the same directory.
Examples:
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 05d660e4ac64..ae2b8b7f9c38 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -28,9 +28,8 @@ Optional properties:
Slave Properties:
Required properties:
- phy_id : Specifies slave phy id
-- phy-mode : The interface between the SoC and the PHY (a string
- that of_get_phy_mode() can understand)
-- mac-address : Specifies slave MAC address
+- phy-mode : See ethernet.txt file in the same directory
+- mac-address : See ethernet.txt file in the same directory
Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
index 2d39c990e641..28767ed7c1bd 100644
--- a/Documentation/devicetree/bindings/net/davicom-dm9000.txt
+++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
@@ -9,8 +9,6 @@ Required properties:
- interrupts : interrupt specifier specific to interrupt controller
Optional properties:
-- local-mac-address : A bytestring of 6 bytes specifying Ethernet MAC address
- to use (from firmware or bootloader)
- davicom,no-eeprom : Configuration EEPROM is not available
- davicom,ext-phy : Use external PHY
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 6e356d15154a..032808843f90 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -17,9 +17,8 @@ Required properties:
Miscellaneous Interrupt>
Optional properties:
-- phy-handle: Contains a phandle to an Ethernet PHY.
+- phy-handle: See ethernet.txt file in the same directory.
If absent, davinci_emac driver defaults to 100/FULL.
-- local-mac-address : 6 bytes, mac address
- ti,davinci-rmii-en: 1 byte, 1 means use RMII
- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
new file mode 100644
index 000000000000..9ecd43d8792c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -0,0 +1,25 @@
+The following properties are common to the Ethernet controllers:
+
+- local-mac-address: array of 6 bytes, specifies the MAC address that was
+ assigned to the network device;
+- mac-address: array of 6 bytes, specifies the MAC address that was last used by
+ the boot program; should be used in cases where the MAC address assigned to
+ the device by the boot program is different from the "local-mac-address"
+ property;
+- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
+- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
+ the maximum frame size (there's contradiction in ePAPR).
+- phy-mode: string, operation mode of the PHY interface; supported values are
+ "mii", "gmii", "sgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
+ "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
+ standard property;
+- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
+- phy-handle: phandle, specifies a reference to a node representing a PHY
+ device; this property is described in ePAPR and so preferred;
+- phy: the same as "phy-handle" property, not recommended for new bindings.
+- phy-device: the same as "phy-handle" property, not recommended for new
+ bindings.
+
+Child nodes of the Ethernet controller are typically the individual PHY devices
+connected via the MDIO bus (sometimes the MDIO bus controller is separate).
+They are described in the phy.txt file in this same directory.
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 845ff848d895..6bc84adb10c0 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -4,12 +4,9 @@ Required properties:
- compatible : Should be "fsl,<soc>-fec"
- reg : Address and length of the register set for the device
- interrupts : Should contain fec interrupt
-- phy-mode : String, operation mode of the PHY interface.
- Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
- "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+- phy-mode : See ethernet.txt file in the same directory
Optional properties:
-- local-mac-address : 6 bytes, mac address
- phy-reset-gpios : Should specify the gpio for phy reset
- phy-reset-duration : Reset duration in milliseconds. Should present
only if property "phy-reset-gpios" is available. Missing the property
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index d2ea4605d078..737cdef4f903 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -38,22 +38,17 @@ Properties:
- model : Model of the device. Can be "TSEC", "eTSEC", or "FEC"
- compatible : Should be "gianfar"
- reg : Offset and length of the register set for the device
- - local-mac-address : List of bytes representing the ethernet address of
- this controller
- interrupts : For FEC devices, the first interrupt is the device's
interrupt. For TSEC and eTSEC devices, the first interrupt is
transmit, the second is receive, and the third is error.
- - phy-handle : The phandle for the PHY connected to this ethernet
- controller.
+ - phy-handle : See ethernet.txt file in the same directory.
- fixed-link : <a b c d e> where a is emulated phy id - choose any,
but unique to the all specified fixed-links, b is duplex - 0 half,
1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
- - phy-connection-type : a string naming the controller/PHY interface type,
- i.e., "mii" (default), "rmii", "gmii", "rgmii", "rgmii-id", "sgmii",
- "tbi", or "rtbi". This property is only really needed if the connection
- is of type "rgmii-id", as all other connection types are detected by
- hardware.
+ - phy-connection-type : See ethernet.txt file in the same directory.
+ This property is only really needed if the connection is of type
+ "rgmii-id", as all other connection types are detected by hardware.
- fsl,magic-packet : If present, indicates that the hardware supports
waking up via magic packet.
- bd-stash : If present, indicates that the hardware supports stashing
diff --git a/Documentation/devicetree/bindings/net/lpc-eth.txt b/Documentation/devicetree/bindings/net/lpc-eth.txt
index 585021acd178..b92e927808b6 100644
--- a/Documentation/devicetree/bindings/net/lpc-eth.txt
+++ b/Documentation/devicetree/bindings/net/lpc-eth.txt
@@ -6,10 +6,9 @@ Required properties:
- interrupts: Should contain ethernet controller interrupt
Optional properties:
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii" (default)
+- phy-mode: See ethernet.txt file in the same directory. If the property is
+ absent, "rmii" is assumed.
- use-iram: Use LPC32xx internal SRAM (IRAM) for DMA buffering
-- local-mac-address : 6 bytes, mac address
Example:
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 70af2ec12b09..aaa696414f57 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -8,16 +8,12 @@ Required properties:
the Cadence GEM, or the generic form: "cdns,gem".
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii", "gmii", "rgmii".
+- phy-mode: See ethernet.txt file in the same directory.
- clock-names: Tuple listing input clock names.
Required elements: 'pclk', 'hclk'
Optional elements: 'tx_clk'
- clocks: Phandles to input clocks.
-Optional properties:
-- local-mac-address: 6 bytes, mac address
-
Examples:
macb0: ethernet@fffc4000 {
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index 859a6fa7569c..750d577e8083 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -4,10 +4,8 @@ Required properties:
- compatible: should be "marvell,armada-370-neta".
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
- property, a single integer).
-- phy-mode: The interface between the SoC and the PHY (a string that
- of_get_phy_mode() can understand)
+- phy: See ethernet.txt file in the same directory.
+- phy-mode: See ethernet.txt file in the same directory
- clocks: a pointer to the reference clock for this device.
Example:
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index c233b6114242..bce52b2ec55e 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -36,7 +36,7 @@ Required port properties:
"marvell,kirkwood-eth-port".
- reg: port number relative to ethernet controller, shall be 0, 1, or 2.
- interrupts: port interrupt.
- - local-mac-address: 6 bytes MAC address.
+ - local-mac-address: See ethernet.txt file in the same directory.
Optional port properties:
- marvell,tx-queue-size: size of the transmit ring buffer.
@@ -48,7 +48,7 @@ Optional port properties:
and
- - phy-handle: phandle reference to ethernet PHY.
+ - phy-handle: See ethernet.txt file in the same directory.
or
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c3d805..8e20c04a1da1 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -4,6 +4,3 @@ Required properties:
- compatible = "micrel,ks8851-ml" of parallel interface
- reg : 2 physical address and size of registers for data and command
- interrupts : interrupt connection
-
-Optional properties:
-- local-mac-address : Ethernet mac address to use
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
new file mode 100644
index 000000000000..e7106b50dbdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -0,0 +1,55 @@
+* Renesas Electronics SH EtherMAC
+
+This file provides information on what the device node for the SH EtherMAC
+interface contains.
+
+Required properties:
+- compatible: "renesas,gether-r8a7740" if the device is a part of R8A7740 SoC.
+ "renesas,ether-r8a7778" if the device is a part of R8A7778 SoC.
+ "renesas,ether-r8a7779" if the device is a part of R8A7779 SoC.
+ "renesas,ether-r8a7790" if the device is a part of R8A7790 SoC.
+ "renesas,ether-r8a7791" if the device is a part of R8A7791 SoC.
+ "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+- reg: offset and length of (1) the E-DMAC/feLic register block (required),
+ (2) the TSU register block (optional).
+- interrupts: interrupt specifier for the sole interrupt.
+- phy-mode: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory.
+- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
+- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
+- clocks: clock phandle and specifier pair.
+- pinctrl-0: phandle, referring to a default pin configuration node.
+
+Optional properties:
+- interrupt-parent: the phandle for the interrupt controller that services
+ interrupts for this device.
+- pinctrl-names: pin configuration state name ("default").
+- renesas,no-ether-link: boolean, specify when a board does not provide a proper
+ Ether LINK signal.
+- renesas,ether-link-active-low: boolean, specify when the Ether LINK signal is
+ active-low instead of normal active-high.
+
+Example (Lager board):
+
+ ethernet@ee700000 {
+ compatible = "renesas,ether-r8a7790";
+ reg = <0 0xee700000 0 0x400>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
+ phy-mode = "rmii";
+ phy-handle = <&phy1>;
+ pinctrl-0 = <&ether_pins>;
+ pinctrl-names = "default";
+ renesas,ether-link-active-low;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&phy1_pins>;
+ pinctrl-names = "default";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 5a41a8658daa..0f8487b88822 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -6,8 +6,7 @@ Required properties:
- interrupts : interrupt connection
Optional properties:
-- phy-device : phandle to Ethernet phy
-- local-mac-address : Ethernet mac address to use
+- phy-device : see ethernet.txt file in the same directory
- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
are supported on the device. Valid value for SMSC LAN91c111 are
1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index adb5b5744ecd..3fed3c124411 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -6,9 +6,7 @@ Required properties:
- interrupts : Should contain SMSC LAN interrupt line
- interrupt-parent : Should be the phandle for the interrupt controller
that services interrupts for this device
-- phy-mode : String, operation mode of the PHY interface.
- Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
- "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+- phy-mode : See ethernet.txt file in the same directory
Optional properties:
- reg-shift : Specify the quantity to shift the register offsets by
@@ -23,7 +21,6 @@ Optional properties:
external PHY
- smsc,save-mac-address : Indicates that mac address needs to be saved
before resetting the controller
-- local-mac-address : 6 bytes, mac address
Examples:
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
new file mode 100644
index 000000000000..3dd3d0bf112f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -0,0 +1,58 @@
+STMicroelectronics SoC DWMAC glue layer controller
+
+The device node has following properties.
+
+Required properties:
+ - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
+ "st,stid127-dwmac".
+ - reg : Offset of the glue configuration register map in system
+ configuration regmap pointed by st,syscon property and size.
+
+ - reg-names : Should be "sti-ethconf".
+
+ - st,syscon : Should be phandle to system configuration node which
+ encompases this glue registers.
+
+ - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
+ wired up in from different sources. One via TXCLK pin and other via CLK_125
+ pin. This wiring is totally board dependent. However the retiming glue
+ logic should be configured accordingly. Possible values for this property
+
+ "txclk" - if 125Mhz clock is wired up via txclk line.
+ "clk_125" - if 125Mhz clock is wired up via clk_125 line.
+
+ This property is only valid for Giga bit setup( GMII, RGMII), and it is
+ un-used for non-giga bit (MII and RMII) setups. Also note that internal
+ clockgen can not generate stable 125Mhz clock.
+
+ - st,ext-phyclk: This boolean property indicates who is generating the clock
+ for tx and rx. This property is only valid for RMII case where the clock can
+ be generated from the MAC or PHY.
+
+ - clock-names: should be "sti-ethclk".
+ - clocks: Should point to ethernet clockgen which can generate phyclk.
+
+
+Example:
+
+ethernet0: dwmac@fe810000 {
+ device_type = "network";
+ compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
+ reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
+ reg-names = "stmmaceth", "sti-ethconf";
+ interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
+ phy-mode = "mii";
+
+ st,syscon = <&syscfg_rear>;
+
+ snps,pbl = <32>;
+ snps,mixed-burst;
+
+ resets = <&softreset STIH416_ETH0_SOFTRESET>;
+ reset-names = "stmmaceth";
+ pinctrl-0 = <&pinctrl_mii0>;
+ pinctrl-names = "default";
+ clocks = <&CLK_S_GMAC0_PHY>;
+ clock-names = "stmmaceth";
+};
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 9d92d42140f2..5748351fb9df 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -10,8 +10,7 @@ Required properties:
- interrupt-names: Should contain the interrupt names "macirq"
"eth_wake_irq" if this interrupt is supported in the "interrupts"
property
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii", "gmii", "rgmii".
+- phy-mode: See ethernet.txt file in the same directory.
- snps,reset-gpio gpio number for phy reset.
- snps,reset-active-low boolean flag to indicate if phy reset is active low.
- snps,reset-delays-us is triplet of delays
@@ -28,12 +27,10 @@ Required properties:
ignored if force_thresh_dma_mode is set.
Optional properties:
-- mac-address: 6 bytes, mac address
- resets: Should contain a phandle to the STMMAC reset signal, if any
- reset-names: Should contain the reset signal name "stmmaceth", if a
reset phandle is given
-- max-frame-size: Maximum Transfer Unit (IEEE defined MTU), rather
- than the maximum frame size.
+- max-frame-size: See ethernet.txt file in the same directory
Examples:
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt
new file mode 100644
index 000000000000..d0327f0b59ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/bq2415x.txt
@@ -0,0 +1,47 @@
+Binding for TI bq2415x Li-Ion Charger
+
+Required properties:
+- compatible: Should contain one of the following:
+ * "ti,bq24150"
+ * "ti,bq24150"
+ * "ti,bq24150a"
+ * "ti,bq24151"
+ * "ti,bq24151a"
+ * "ti,bq24152"
+ * "ti,bq24153"
+ * "ti,bq24153a"
+ * "ti,bq24155"
+ * "ti,bq24156"
+ * "ti,bq24156a"
+ * "ti,bq24158"
+- reg: integer, i2c address of the device.
+- ti,current-limit: integer, initial maximum current charger can pull
+ from power supply in mA.
+- ti,weak-battery-voltage: integer, weak battery voltage threshold in mV.
+ The chip will use slow precharge if battery voltage
+ is below this value.
+- ti,battery-regulation-voltage: integer, maximum charging voltage in mV.
+- ti,charge-current: integer, maximum charging current in mA.
+- ti,termination-current: integer, charge will be terminated when current in
+ constant-voltage phase drops below this value (in mA).
+- ti,resistor-sense: integer, value of sensing resistor in milliohm.
+
+Optional properties:
+- ti,usb-charger-detection: phandle to usb charger detection device.
+ (required for auto mode)
+
+Example from Nokia N900:
+
+bq24150a {
+ compatible = "ti,bq24150a";
+ reg = <0x6b>;
+
+ ti,current-limit = <100>;
+ ti,weak-battery-voltage = <3400>;
+ ti,battery-regulation-voltage = <4200>;
+ ti,charge-current = <650>;
+ ti,termination-current = <100>;
+ ti,resistor-sense = <68>;
+
+ ti,usb-charger-detection = <&isp1704>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt
index 07e04cdc0c9e..4f8184d069cb 100644
--- a/Documentation/devicetree/bindings/spi/spi_atmel.txt
+++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt
@@ -5,6 +5,9 @@ Required properties:
- reg: Address and length of the register set for the device
- interrupts: Should contain spi interrupt
- cs-gpios: chipselects
+- clock-names: tuple listing input clock names.
+ Required elements: "spi_clk"
+- clocks: phandles to input clocks.
Example:
@@ -14,6 +17,8 @@ spi1: spi@fffcc000 {
interrupts = <13 4 5>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&spi1_clk>;
+ clock-names = "spi_clk";
cs-gpios = <&pioB 3 0>;
status = "okay";
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 3f900cd51bf0..40ce2df0e0e9 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@ ad Avionic Design GmbH
adi Analog Devices, Inc.
aeroflexgaisler Aeroflex Gaisler AB
ak Asahi Kasei Corp.
+allwinner Allwinner Technology Co., Ltd.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
amstaos AMS-Taos Inc.
@@ -40,6 +41,7 @@ gmt Global Mixed-mode Technology, Inc.
gumstix Gumstix, Inc.
haoyu Haoyu Microelectronic Co. Ltd.
hisilicon Hisilicon Limited.
+honeywell Honeywell
hp Hewlett Packard
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
@@ -55,6 +57,7 @@ maxim Maxim Integrated Products
microchip Microchip Technology Inc.
mosaixtech Mosaix Technologies, Inc.
national National Semiconductor
+neonode Neonode Inc.
nintendo Nintendo
nvidia NVIDIA
nxp NXP Semiconductors
@@ -64,7 +67,7 @@ phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
powervr PowerVR (deprecated, use img)
qca Qualcomm Atheros, Inc.
-qcom Qualcomm, Inc.
+qcom Qualcomm Technologies, Inc
ralink Mediatek/Ralink Technology Corp.
ramtron Ramtron International
realtek Realtek Semiconductor Corp.
@@ -78,6 +81,7 @@ silabs Silicon Laboratories
simtek
sirf SiRF Technology, Inc.
snps Synopsys, Inc.
+spansion Spansion Inc.
st STMicroelectronics
ste ST-Ericsson
stericsson ST-Ericsson
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index c70e7a7638d1..0d85ac1935b7 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -8,8 +8,8 @@ reason, the kernel code must instantiate I2C devices explicitly. There are
several ways to achieve this, depending on the context and requirements.
-Method 1: Declare the I2C devices by bus number
------------------------------------------------
+Method 1a: Declare the I2C devices by bus number
+------------------------------------------------
This method is appropriate when the I2C bus is a system bus as is the case
for many embedded systems. On such systems, each I2C bus has a number
@@ -51,6 +51,43 @@ The devices will be automatically unbound and destroyed when the I2C bus
they sit on goes away (if ever.)
+Method 1b: Declare the I2C devices via devicetree
+-------------------------------------------------
+
+This method has the same implications as method 1a. The declaration of I2C
+devices is here done via devicetree as subnodes of the master controller.
+
+Example:
+
+ i2c1: i2c@400a0000 {
+ /* ... master properties skipped ... */
+ clock-frequency = <100000>;
+
+ flash@50 {
+ compatible = "atmel,24c256";
+ reg = <0x50>;
+ };
+
+ pca9532: gpio@60 {
+ compatible = "nxp,pca9532";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x60>;
+ };
+ };
+
+Here, two devices are attached to the bus using a speed of 100kHz. For
+additional properties which might be needed to set up the device, please refer
+to its devicetree documentation in Documentation/devicetree/bindings/.
+
+
+Method 1c: Declare the I2C devices via ACPI
+-------------------------------------------
+
+ACPI can also describe I2C devices. There is special documentation for this
+which is currently located at Documentation/acpi/enumeration.txt.
+
+
Method 2: Instantiate the devices explicitly
--------------------------------------------
diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt
deleted file mode 100644
index 72f38b13101d..000000000000
--- a/Documentation/networking/3c505.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-The 3Com Etherlink Plus (3c505) driver.
-
-This driver now uses DMA. There is currently no support for PIO operation.
-The default DMA channel is 6; this is _not_ autoprobed, so you must
-make sure you configure it correctly. If loading the driver as a
-module, you can do this with "modprobe 3c505 dma=n". If the driver is
-linked statically into the kernel, you must either use an "ether="
-statement on the command line, or change the definition of ELP_DMA in 3c505.h.
-
-The driver will warn you if it has to fall back on the compiled in
-default DMA channel.
-
-If no base address is given at boot time, the driver will autoprobe
-ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
-will try to probe for it.
-
-The driver can be used as a loadable module.
-
-Theoretically, one instance of the driver can now run multiple cards,
-in the standard way (when loading a module, say "modprobe 3c505
-io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
-this, though.
-
-The driver may now support revision 2 hardware; the dependency on
-being able to read the host control register has been removed. This
-is also untested, since I don't have a suitable card.
-
-Known problems:
- I still see "DMA upload timed out" messages from time to time. These
-seem to be fairly non-fatal though.
- The card is old and slow.
-
-To do:
- Improve probe/setup code
- Test multicast and promiscuous operation
-
-Authors:
- The driver is mainly written by Craig Southeren, email
- <craigs@ineluki.apana.org.au>.
- Parts of the driver (adapting the driver to 1.1.4+ kernels,
- IRQ/address detection, some changes) and this README by
- Juha Laiho <jlaiho@ichaos.nullnet.fi>.
- DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
- Multicard support, Software configurable DMA, etc., by
- Christopher Collins <ccollins@pcug.org.au>
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5cdb22971d19..a383c00392d0 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -270,16 +270,15 @@ arp_ip_target
arp_validate
Specifies whether or not ARP probes and replies should be
- validated in the active-backup mode. This causes the ARP
- monitor to examine the incoming ARP requests and replies, and
- only consider a slave to be up if it is receiving the
- appropriate ARP traffic.
+ validated in any mode that supports arp monitoring, or whether
+ non-ARP traffic should be filtered (disregarded) for link
+ monitoring purposes.
Possible values are:
none or 0
- No validation is performed. This is the default.
+ No validation or filtering is performed.
active or 1
@@ -293,31 +292,68 @@ arp_validate
Validation is performed for all slaves.
- For the active slave, the validation checks ARP replies to
- confirm that they were generated by an arp_ip_target. Since
- backup slaves do not typically receive these replies, the
- validation performed for backup slaves is on the ARP request
- sent out via the active slave. It is possible that some
- switch or network configurations may result in situations
- wherein the backup slaves do not receive the ARP requests; in
- such a situation, validation of backup slaves must be
- disabled.
-
- The validation of ARP requests on backup slaves is mainly
- helping bonding to decide which slaves are more likely to
- work in case of the active slave failure, it doesn't really
- guarantee that the backup slave will work if it's selected
- as the next active slave.
-
- This option is useful in network configurations in which
- multiple bonding hosts are concurrently issuing ARPs to one or
- more targets beyond a common switch. Should the link between
- the switch and target fail (but not the switch itself), the
- probe traffic generated by the multiple bonding instances will
- fool the standard ARP monitor into considering the links as
- still up. Use of the arp_validate option can resolve this, as
- the ARP monitor will only consider ARP requests and replies
- associated with its own instance of bonding.
+ filter or 4
+
+ Filtering is applied to all slaves. No validation is
+ performed.
+
+ filter_active or 5
+
+ Filtering is applied to all slaves, validation is performed
+ only for the active slave.
+
+ filter_backup or 6
+
+ Filtering is applied to all slaves, validation is performed
+ only for backup slaves.
+
+ Validation:
+
+ Enabling validation causes the ARP monitor to examine the incoming
+ ARP requests and replies, and only consider a slave to be up if it
+ is receiving the appropriate ARP traffic.
+
+ For an active slave, the validation checks ARP replies to confirm
+ that they were generated by an arp_ip_target. Since backup slaves
+ do not typically receive these replies, the validation performed
+ for backup slaves is on the broadcast ARP request sent out via the
+ active slave. It is possible that some switch or network
+ configurations may result in situations wherein the backup slaves
+ do not receive the ARP requests; in such a situation, validation
+ of backup slaves must be disabled.
+
+ The validation of ARP requests on backup slaves is mainly helping
+ bonding to decide which slaves are more likely to work in case of
+ the active slave failure, it doesn't really guarantee that the
+ backup slave will work if it's selected as the next active slave.
+
+ Validation is useful in network configurations in which multiple
+ bonding hosts are concurrently issuing ARPs to one or more targets
+ beyond a common switch. Should the link between the switch and
+ target fail (but not the switch itself), the probe traffic
+ generated by the multiple bonding instances will fool the standard
+ ARP monitor into considering the links as still up. Use of
+ validation can resolve this, as the ARP monitor will only consider
+ ARP requests and replies associated with its own instance of
+ bonding.
+
+ Filtering:
+
+ Enabling filtering causes the ARP monitor to only use incoming ARP
+ packets for link availability purposes. Arriving packets that are
+ not ARPs are delivered normally, but do not count when determining
+ if a slave is available.
+
+ Filtering operates by only considering the reception of ARP
+ packets (any ARP packet, regardless of source or destination) when
+ determining if a slave has received traffic for link availability
+ purposes.
+
+ Filtering is useful in network configurations in which significant
+ levels of third party broadcast traffic would fool the standard
+ ARP monitor into considering the links as still up. Use of
+ filtering can resolve this, as only ARP traffic is considered for
+ link availability purposes.
This option was added in bonding version 3.1.0.
diff --git a/Documentation/networking/gianfar.txt b/Documentation/networking/gianfar.txt
index ad474ea07d07..ba1daea7f2e4 100644
--- a/Documentation/networking/gianfar.txt
+++ b/Documentation/networking/gianfar.txt
@@ -1,38 +1,8 @@
The Gianfar Ethernet Driver
-Sysfs File description
Author: Andy Fleming <afleming@freescale.com>
Updated: 2005-07-28
-SYSFS
-
-Several of the features of the gianfar driver are controlled
-through sysfs files. These are:
-
-bd_stash:
-To stash RX Buffer Descriptors in the L2, echo 'on' or '1' to
-bd_stash, echo 'off' or '0' to disable
-
-rx_stash_len:
-To stash the first n bytes of the packet in L2, echo the number
-of bytes to buf_stash_len. echo 0 to disable.
-
-WARNING: You could really screw these up if you set them too low or high!
-fifo_threshold:
-To change the number of bytes the controller needs in the
-fifo before it starts transmission, echo the number of bytes to
-fifo_thresh. Range should be 0-511.
-
-fifo_starve:
-When the FIFO has less than this many bytes during a transmit, it
-enters starve mode, and increases the priority of TX memory
-transactions. To change, echo the number of bytes to
-fifo_starve. Range should be 0-511.
-
-fifo_starve_off:
-Once in starve mode, the FIFO remains there until it has this
-many bytes. To change, echo the number of bytes to
-fifo_starve_off. Range should be 0-511.
CHECKSUM OFFLOADING
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index ebf270719402..e602c6f347df 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -253,16 +253,25 @@ Writing a PHY driver
Each driver consists of a number of function pointers:
+ soft_reset: perform a PHY software reset
config_init: configures PHY into a sane state after a reset.
For instance, a Davicom PHY requires descrambling disabled.
probe: Allocate phy->priv, optionally refuse to bind.
PHY may not have been reset or had fixups run yet.
suspend/resume: power management
config_aneg: Changes the speed/duplex/negotiation settings
+ aneg_done: Determines the auto-negotiation result
read_status: Reads the current speed/duplex/negotiation settings
ack_interrupt: Clear a pending interrupt
+ did_interrupt: Checks if the PHY generated an interrupt
config_intr: Enable or disable interrupts
remove: Does any driver take-down
+ ts_info: Queries about the HW timestamping status
+ hwtstamp: Set the PHY HW timestamping configuration
+ rxtstamp: Requests a receive timestamp at the PHY level for a 'skb'
+ txtsamp: Requests a transmit timestamp at the PHY level for a 'skb'
+ set_wol: Enable Wake-on-LAN at the PHY level
+ get_wol: Get the Wake-on-LAN status at the PHY level
Of these, only config_aneg and read_status are required to be
assigned by the driver code. The rest are optional. Also, it is
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index 7d11bb5dc30a..bdc4c0db51e1 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -30,7 +30,7 @@ A congestion control mechanism can be registered through functions in
tcp_cong.c. The functions used by the congestion control mechanism are
registered via passing a tcp_congestion_ops struct to
tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid, min_cwnd must be valid.
+cong_avoid must be valid.
Private data for a congestion control mechanism is stored in tp->ca_priv.
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index 0103e4b15b0e..ebff6ee52441 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -75,14 +75,26 @@ Before the controller can make use of the PHY, it has to get a reference to
it. This framework provides the following APIs to get a reference to the PHY.
struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
-
-phy_get and devm_phy_get can be used to get the PHY. In the case of dt boot,
-the string arguments should contain the phy name as given in the dt data and
-in the case of non-dt boot, it should contain the label of the PHY.
-The only difference between the two APIs is that devm_phy_get associates the
-device with the PHY using devres on successful PHY get. On driver detach,
-release function is invoked on the the devres data and devres data is freed.
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
+
+phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can
+be used to get the PHY. In the case of dt boot, the string arguments
+should contain the phy name as given in the dt data and in the case of
+non-dt boot, it should contain the label of the PHY. The two
+devm_phy_get associates the device with the PHY using devres on
+successful PHY get. On driver detach, release function is invoked on
+the the devres data and devres data is freed. phy_optional_get and
+devm_phy_optional_get should be used when the phy is optional. These
+two functions will never return -ENODEV, but instead returns NULL when
+the phy cannot be found.
+
+It should be noted that NULL is a valid phy reference. All phy
+consumer calls on the NULL phy become NOPs. That is the release calls,
+the phy_init() and phy_exit() calls, and phy_power_on() and
+phy_power_off() calls are all NOP when applied to a NULL phy. The NULL
+phy is useful in devices for handling optional phy devices.
5. Releasing a reference to the PHY
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index f72e0d1e0da8..7982bcc4d151 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -543,7 +543,22 @@ SPI MASTER METHODS
queuing transfers that arrive in the meantime. When the driver is
finished with this message, it must call
spi_finalize_current_message() so the subsystem can issue the next
- transfer. This may sleep.
+ message. This may sleep.
+
+ master->transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer)
+ The subsystem calls the driver to transfer a single transfer while
+ queuing transfers that arrive in the meantime. When the driver is
+ finished with this transfer, it must call
+ spi_finalize_current_transfer() so the subsystem can issue the next
+ transfer. This may sleep. Note: transfer_one and transfer_one_message
+ are mutually exclusive; when both are set, the generic subsystem does
+ not call your transfer_one callback.
+
+ Return values:
+ negative errno: error
+ 0: transfer is finished
+ 1: transfer is still in progress
DEPRECATED METHODS
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt
index 28fa325b7461..6f6d956ac1c9 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/zh_CN/arm64/booting.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
Maintainer: Will Deacon <will.deacon@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
---------------------------------------------------------------------
Documentation/arm64/booting.txt 的中文翻译
@@ -16,9 +16,9 @@ Documentation/arm64/booting.txt 的中文翻译
译存在问题,请è”系中文版维护者。
英文版维护者: Will Deacon <will.deacon@arm.com>
-中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
以下为正文
---------------------------------------------------------------------
@@ -64,8 +64,8 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½•
å¿…è¦æ€§: 强制
-设备树数æ®å—(dtb)大å°å¿…é¡»ä¸å¤§äºŽ 2 MB,且ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个
-512MB 内的 2MB 边界上。这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
+设备树数æ®å—(dtb)必须 8 字节对é½ï¼Œå¹¶ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个 512MB
+内,且ä¸å¾—跨越 2MB 对é½è¾¹ç•Œã€‚这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
映射此数æ®å—。
@@ -84,13 +84,23 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
å¿…è¦æ€§: 强制
-已解压的内核映åƒåŒ…å«ä¸€ä¸ª 32 字节的头,内容如下:
+已解压的内核映åƒåŒ…å«ä¸€ä¸ª 64 字节的头,内容如下:
- u32 magic = 0x14000008; /* 跳转到 stext, å°ç«¯ */
- u32 res0 = 0; /* ä¿ç•™ */
+ u32 code0; /* å¯æ‰§è¡Œä»£ç  */
+ u32 code1; /* å¯æ‰§è¡Œä»£ç  */
u64 text_offset; /* 映åƒè£…è½½å移 */
+ u64 res0 = 0; /* ä¿ç•™ */
u64 res1 = 0; /* ä¿ç•™ */
u64 res2 = 0; /* ä¿ç•™ */
+ u64 res3 = 0; /* ä¿ç•™ */
+ u64 res4 = 0; /* ä¿ç•™ */
+ u32 magic = 0x644d5241; /* 魔数, å°ç«¯, "ARM\x64" */
+ u32 res5 = 0; /* ä¿ç•™ */
+
+
+映åƒå¤´æ³¨é‡Šï¼š
+
+- code0/code1 负责跳转到 stext.
映åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM
的起始地å€å¿…须是以 2MB 对é½çš„。
@@ -118,9 +128,9 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
外部高速缓存(如果存在)必须é…置并ç¦ç”¨ã€‚
- 架构计时器
- CNTFRQ 必须设定为计时器的频率。
- 如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的 EL1PCTEN (bit 0)
- 必须置ä½ã€‚
+ CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU
+ 都一致的值。如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的
+ EL1PCTEN (bit 0) 必须置ä½ã€‚
- 一致性
通过内核å¯åŠ¨çš„所有 CPU 在内核入å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域中。
@@ -131,23 +141,40 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
在进入内核映åƒçš„异常级中,所有构架中å¯å†™çš„系统寄存器必须通过软件
在一个更高的异常级别下åˆå§‹åŒ–,以防止在 未知 状æ€ä¸‹è¿è¡Œã€‚
+以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å­˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„存器的
+å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚常级别跳入内核。
+
引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…核入å£ï¼š
- 主 CPU 必须直接跳入内核映åƒçš„第一æ¡æŒ‡ä»¤ã€‚通过此 CPU 传递的设备树
- æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä»¥ä¸‹å†…容:
-
- 1ã€â€˜enable-method’属性。目å‰ï¼Œæ­¤å­—段支æŒçš„值仅为字符串“spin-tableâ€ã€‚
-
- 2ã€â€˜cpu-release-addr’标识一个 64-bitã€åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
+ æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä¸€ä¸ª ‘enable-method’ 属性,所
+ 支æŒçš„ enable-method 请è§ä¸‹æ–‡ã€‚
引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘属性,并在跳入内核入å£ä¹‹å‰å°†å…¶æ’å…¥
æ•°æ®å—。
-- 任何辅助 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
+- enable-method 为 “spin-table†的 CPU 必须在它们的 CPU
+ 节点中包å«ä¸€ä¸ª ‘cpu-release-addr’ 属性。这个属性标识了一个
+ 64 ä½è‡ªç„¶å¯¹é½ä¸”åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
+
+ 这些 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
给内核)中自旋于内核之外,轮询它们的 cpu-release-addr ä½ç½®ï¼ˆå¿…é¡»
包å«åœ¨ä¿ç•™åŒºä¸­ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°†
å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼
- 时,CPU 必须直接跳入此值所指å‘的地å€ã€‚
+ 时,CPU 必须跳入此值所指å‘的地å€ã€‚此值为一个å•ç‹¬çš„ 64 ä½å°ç«¯å€¼ï¼Œ
+ å› æ­¤ CPU 须在跳转å‰å°†æ‰€è¯»å–的值转æ¢ä¸ºå…¶æœ¬èº«çš„端模å¼ã€‚
+
+- enable-method 为 “psci†的 CPU ä¿æŒåœ¨å†…核外(比如,在
+ memory 节点中æ述为内核空间的内存区外,或在通过设备树 /memreserve/
+ 域中æ述为内核ä¿ç•™åŒºçš„空间中)。内核将会å‘起在 ARM 文档(编å·
+ ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸­æè¿°çš„
+ CPU_ON 调用æ¥å°† CPU 带入内核。
+
+ *译者注:到文档翻译时,此文档已更新为 ARM DEN 0022B。
+
+ 设备树必须包å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档:
+ Documentation/devicetree/bindings/arm/psci.txt
+
- 辅助 CPU 通用寄存器设置
x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨)
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt
index a5f6283829f9..a782704c1cb5 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/zh_CN/arm64/memory.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
Maintainer: Catalin Marinas <catalin.marinas@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
---------------------------------------------------------------------
Documentation/arm64/memory.txt 的中文翻译
@@ -16,9 +16,9 @@ Documentation/arm64/memory.txt 的中文翻译
译存在问题,请è”系中文版维护者。
英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
-中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
以下为正文
---------------------------------------------------------------------
@@ -41,7 +41,7 @@ AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和å
TTBR1 中,且从ä¸å†™å…¥ TTBR0。
-AArch64 Linux 内存布局:
+AArch64 Linux 在页大å°ä¸º 4KB 时的内存布局:
èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
-----------------------------------------------------------------------
@@ -55,15 +55,42 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
+ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备
+
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间
-ffffffbbffff0000 ffffffbcffffffff ~2MB [防护页]
+ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页]
ffffffbffc000000 ffffffbfffffffff 64MB 模å—
ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
+AArch64 Linux 在页大å°ä¸º 64KB 时的内存布局:
+
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000000000000000 000003ffffffffff 4TB 用户空间
+
+fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
+
+fffffdfbffff0000 fffffdfbffffffff 64KB [防护页]
+
+fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
+
+fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
+
+fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备
+
+fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间
+
+fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页]
+
+fffffdfffc000000 fffffdffffffffff 64MB 模å—
+
+fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
+
+
4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š
+--------+--------+--------+--------+--------+--------+--------+--------+
@@ -91,3 +118,10 @@ ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
| | +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
| +-------------------------------> [47:42] L1 索引 (未使用)
+-------------------------------------------------> [63] TTBR0/1
+
+当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 中通过相对内核虚拟地å€çš„
+一个固定å移æ¥æ˜ å°„内核页(内核虚拟地å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰:
+
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000004000000000 0000007fffffffff 256GB 在 HYP 中映射的内核对象
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt
new file mode 100644
index 000000000000..2664d1bd5a1c
--- /dev/null
+++ b/Documentation/zh_CN/arm64/tagged-pointers.txt
@@ -0,0 +1,52 @@
+Chinese translated version of Documentation/arm64/tagged-pointers.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Will Deacon <will.deacon@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+---------------------------------------------------------------------
+Documentation/arm64/tagged-pointers.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
+交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
+译存在问题,请è”系中文版维护者。
+
+英文版维护者: Will Deacon <will.deacon@arm.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
+
+以下为正文
+---------------------------------------------------------------------
+ Linux 在 AArch64 中带标记的虚拟地å€
+ =================================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2013 年 06 月 12 日
+
+本文档简述了在 AArch64 地å€è½¬æ¢ç³»ç»Ÿä¸­æ供的带标记的虚拟地å€åŠå…¶åœ¨
+AArch64 Linux 中的潜在用途。
+
+内核æ供的地å€è½¬æ¢è¡¨é…置使通过 TTBR0 完æˆçš„虚拟地å€è½¬æ¢ï¼ˆå³ç”¨æˆ·ç©ºé—´
+映射),其虚拟地å€çš„最高 8 ä½ï¼ˆ63:56)会被转æ¢ç¡¬ä»¶æ‰€å¿½ç•¥ã€‚è¿™ç§æœºåˆ¶
+让这些ä½å¯ä¾›åº”用程åºè‡ªç”±ä½¿ç”¨ï¼Œå…¶æ³¨æ„事项如下:
+
+ (1) 内核è¦æ±‚所有传递到 EL1 的用户空间地å€å¸¦æœ‰ 0x00 标记。
+ è¿™æ„味ç€ä»»ä½•æºå¸¦ç”¨æˆ·ç©ºé—´è™šæ‹Ÿåœ°å€çš„系统调用(syscall)
+ å‚æ•° *å¿…é¡»* 在陷入内核å‰ä½¿å®ƒä»¬çš„最高字节被清零。
+
+ (2) éžé›¶æ ‡è®°åœ¨ä¼ é€’ä¿¡å·æ—¶ä¸è¢«ä¿å­˜ã€‚è¿™æ„味ç€åœ¨åº”用程åºä¸­åˆ©ç”¨äº†
+ 标记的信å·å¤„ç†å‡½æ•°æ— æ³•ä¾èµ– siginfo_t 的用户空间虚拟
+ 地å€æ‰€æºå¸¦çš„包å«å…¶å†…部域信æ¯çš„标记。此规则的一个例外是
+ 当信å·æ˜¯åœ¨è°ƒè¯•è§‚察点的异常处ç†ç¨‹åºä¸­äº§ç”Ÿçš„,此时标记的
+ ä¿¡æ¯å°†è¢«ä¿å­˜ã€‚
+
+ (3) 当使用带标记的指针时需特别留心,因为仅对两个虚拟地å€
+ 的高字节,C 编译器很å¯èƒ½æ— æ³•åˆ¤æ–­å®ƒä»¬æ˜¯ä¸åŒçš„。
+
+此构架会阻止对带标记的 PC 指针的利用,因此在异常返回时,其高字节
+将被设置æˆä¸€ä¸ªä¸º “55†的扩展符。
diff --git a/MAINTAINERS b/MAINTAINERS
index f6bc6184ed6f..b0196ab3e489 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1845,6 +1845,12 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
+BROADCOM GENET ETHERNET DRIVER
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/broadcom/genet/
+
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Michael Chan <mchan@broadcom.com>
L: netdev@vger.kernel.org
@@ -2367,7 +2373,7 @@ F: include/linux/cpufreq.h
CPU FREQUENCY DRIVERS - ARM BIG LITTLE
M: Viresh Kumar <viresh.kumar@linaro.org>
-M: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+M: Sudeep Holla <sudeep.holla@arm.com>
L: cpufreq@vger.kernel.org
L: linux-pm@vger.kernel.org
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
@@ -2857,7 +2863,7 @@ M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
Q: http://patchwork.freedesktop.org/project/intel-gfx/
-T: git git://people.freedesktop.org/~danvet/drm-intel
+T: git git://anongit.freedesktop.org/drm-intel
S: Supported
F: drivers/gpu/drm/i915/
F: include/drm/i915*
@@ -3324,6 +3330,17 @@ S: Maintained
F: include/linux/netfilter_bridge/
F: net/bridge/
+ETHERNET PHY LIBRARY
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: include/linux/phy.h
+F: include/linux/phy_fixed.h
+F: drivers/net/phy/
+F: Documentation/networking/phy.txt
+F: drivers/of/of_mdio.c
+F: drivers/of/of_net.c
+
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
diff --git a/Makefile b/Makefile
index 933e1def6baf..893d6f0e875b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index b9d6a8b485e0..6d1e43d46187 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb
dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb
dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb
# sama5d3
+dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
new file mode 100644
index 000000000000..ce1375595e5f
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -0,0 +1,229 @@
+/*
+ * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board
+ *
+ * Copyright (C) 2014 Atmel,
+ * 2014 Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+/dts-v1/;
+#include "sama5d36.dtsi"
+
+/ {
+ model = "SAMA5D3 Xplained";
+ compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ reg = <0x20000000 0x10000000>;
+ };
+
+ ahb {
+ apb {
+ mmc0: mmc@f0000000 {
+ pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>;
+ status = "okay";
+ slot@0 {
+ reg = <0>;
+ bus-width = <8>;
+ cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ spi0: spi@f0004000 {
+ cs-gpios = <&pioD 13 0>;
+ status = "okay";
+ };
+
+ can0: can@f000c000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@f0014000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@f0018000 {
+ status = "okay";
+ };
+
+ macb0: ethernet@f0028000 {
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
+ usart0: serial@f001c000 {
+ status = "okay";
+ };
+
+ usart1: serial@f0020000 {
+ pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>;
+ status = "okay";
+ };
+
+ uart0: serial@f0024000 {
+ status = "okay";
+ };
+
+ mmc1: mmc@f8000000 {
+ pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>;
+ status = "okay";
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ spi1: spi@f8008000 {
+ cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+ status = "okay";
+ };
+
+ adc0: adc@f8018000 {
+ pinctrl-0 = <
+ &pinctrl_adc0_adtrg
+ &pinctrl_adc0_ad0
+ &pinctrl_adc0_ad1
+ &pinctrl_adc0_ad2
+ &pinctrl_adc0_ad3
+ &pinctrl_adc0_ad4
+ &pinctrl_adc0_ad5
+ &pinctrl_adc0_ad6
+ &pinctrl_adc0_ad7
+ &pinctrl_adc0_ad8
+ &pinctrl_adc0_ad9
+ >;
+ status = "okay";
+ };
+
+ i2c2: i2c@f801c000 {
+ dmas = <0>, <0>; /* Do not use DMA for i2c2 */
+ status = "okay";
+ };
+
+ macb1: ethernet@f802c000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
+ dbgu: serial@ffffee00 {
+ status = "okay";
+ };
+
+ pinctrl@fffff200 {
+ board {
+ pinctrl_mmc0_cd: mmc0_cd {
+ atmel,pins =
+ <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+
+ pinctrl_mmc1_cd: mmc1_cd {
+ atmel,pins =
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+
+ pinctrl_usba_vbus: usba_vbus {
+ atmel,pins =
+ <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */
+ };
+ };
+ };
+
+ pmc: pmc@fffffc00 {
+ main: mainck {
+ clock-frequency = <12000000>;
+ };
+ };
+ };
+
+ nand0: nand@60000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "hw";
+ atmel,has-pmecc;
+ atmel,pmecc-cap = <4>;
+ atmel,pmecc-sector-size = <512>;
+ nand-on-flash-bbt;
+ status = "okay";
+
+ at91bootstrap@0 {
+ label = "at91bootstrap";
+ reg = <0x0 0x40000>;
+ };
+
+ bootloader@40000 {
+ label = "bootloader";
+ reg = <0x40000 0x80000>;
+ };
+
+ bootloaderenv@c0000 {
+ label = "bootloader env";
+ reg = <0xc0000 0xc0000>;
+ };
+
+ dtb@180000 {
+ label = "device tree";
+ reg = <0x180000 0x80000>;
+ };
+
+ kernel@200000 {
+ label = "kernel";
+ reg = <0x200000 0x600000>;
+ };
+
+ rootfs@800000 {
+ label = "rootfs";
+ reg = <0x800000 0x0f800000>;
+ };
+ };
+
+ usb0: gadget@00500000 {
+ atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usba_vbus>;
+ status = "okay";
+ };
+
+ usb1: ohci@00600000 {
+ num-ports = <3>;
+ atmel,vbus-gpio = <0
+ &pioE 3 GPIO_ACTIVE_LOW
+ &pioE 4 GPIO_ACTIVE_LOW
+ >;
+ status = "okay";
+ };
+
+ usb2: ehci@00700000 {
+ status = "okay";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ bp3 {
+ label = "PB_USER";
+ gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
+ linux,code = <0x104>;
+ gpio-key,wakeup;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ d2 {
+ label = "d2";
+ gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */
+ linux,default-trigger = "heartbeat";
+ };
+
+ d3 {
+ label = "d3";
+ gpios = <&pioE 24 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 0042f73068b0..fece8665fb63 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -523,7 +523,7 @@
};
i2c0: i2c@fff88000 {
- compatible = "atmel,at91sam9263-i2c";
+ compatible = "atmel,at91sam9260-i2c";
reg = <0xfff88000 0x100>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index e9487f6f0166..924a6a6ffd0f 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -124,6 +124,10 @@
nand-on-flash-bbt;
status = "okay";
};
+
+ usb0: ohci@00500000 {
+ status = "okay";
+ };
};
leds {
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index fd8fc7cd53f3..5bfae54fb780 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -52,12 +52,6 @@
};
};
- codec: spdif-transmitter {
- compatible = "linux,spdif-dit";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hummingboard_spdif>;
- };
-
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@@ -111,7 +105,7 @@
};
pinctrl_hummingboard_spdif: hummingboard-spdif {
- fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
+ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
@@ -142,6 +136,8 @@
};
&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_spdif>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 64daa3b311f6..c2a24888a276 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -46,12 +46,6 @@
};
};
- codec: spdif-transmitter {
- compatible = "linux,spdif-dit";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_cubox_i_spdif>;
- };
-
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@@ -89,7 +83,7 @@
};
pinctrl_cubox_i_spdif: cubox-i-spdif {
- fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
+ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
@@ -121,6 +115,8 @@
};
&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_spdif>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 52447c17537a..3d5faf85f51b 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1228,7 +1228,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+ clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
<&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 0c1e8d871ed1..6cb9b68e2188 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -188,7 +188,6 @@
msp2: msp@80117000 {
pinctrl-names = "default";
pinctrl-0 = <&msp2_default_mode>;
- status = "okay";
};
msp3: msp@80125000 {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 845bc745706b..ee6982976d66 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y
CONFIG_ARCH_OMAP4=y
CONFIG_SOC_OMAP5=y
CONFIG_SOC_AM33XX=y
+CONFIG_SOC_DRA7XX=y
CONFIG_SOC_AM43XX=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_SOCFPGA=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e9a49fe0284e..8b8b61685a34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb();
}
/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 03243f7eeddf..85c60adc8b60 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,13 +120,16 @@
/*
* 2nd stage PTE definitions for LPAE.
*/
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
+#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
+#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c6072aa45..ac4bfae26702 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,18 +37,9 @@
static inline void dsb_sev(void)
{
-#if __LINUX_ARM_ARCH__ >= 7
- __asm__ __volatile__ (
- "dsb ishst\n"
- SEV
- );
-#else
- __asm__ __volatile__ (
- "mcr p15, 0, %0, c7, c10, 4\n"
- SEV
- : : "r" (0)
- );
-#endif
+
+ dsb(ishst);
+ __asm__(SEV);
}
/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b0df9761de6d..1e8b030dbefd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
- res = memblock_virt_alloc_low(sizeof(*res), 0);
+ res = memblock_virt_alloc(sizeof(*res), 0);
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig
index 8f4649b301b2..1abae5f6a418 100644
--- a/arch/arm/mach-hisi/Kconfig
+++ b/arch/arm/mach-hisi/Kconfig
@@ -8,7 +8,7 @@ config ARCH_HI3xxx
select CLKSRC_OF
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
- select HAVE_ARM_TWD
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select PINCTRL
select PINCTRL_SINGLE
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index af2e582d2b74..4d677f442539 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
if (IS_ENABLED(CONFIG_PCI_IMX6))
clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
base = of_iomap(np, 0);
WARN_ON(!base);
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index 3781a1853998..4c86f3035205 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
/* Audio-related clocks configuration */
clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]);
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt");
base = of_iomap(np, 0);
WARN_ON(!base);
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index 9d47adc078aa..7a9b98589db7 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -236,8 +236,6 @@ void __init imx6q_pm_init(void)
regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
IMX6Q_GPR1_GINT);
- /* Set initial power mode */
- imx6q_set_lpm(WAIT_CLOCKED);
suspend_set_ops(&imx6q_pm_ops);
}
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index ba470d64493b..3795ae28a613 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -2,7 +2,6 @@ config ARCH_MOXART
bool "MOXA ART SoC" if ARCH_MULTI_V4T
select CPU_FA526
select ARM_DMA_MEM_BUFFERABLE
- select DMA_OF
select USE_OF
select CLKSRC_OF
select CLKSRC_MMIO
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 653b489479e0..e2ce4f8366a7 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -54,7 +54,7 @@ config SOC_OMAP5
select ARM_GIC
select CPU_V7
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c
index c9f309ae88c5..8b90c4f2d430 100644
--- a/arch/arm/mach-pxa/am300epd.c
+++ b/arch/arm/mach-pxa/am300epd.c
@@ -30,6 +30,7 @@
#include <mach/gumstix.h>
#include <mach/mfp-pxa25x.h>
+#include <mach/irqs.h>
#include <linux/platform_data/video-pxafb.h>
#include "generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h
index 954641e6c8b1..1b0825911e62 100644
--- a/arch/arm/mach-pxa/include/mach/balloon3.h
+++ b/arch/arm/mach-pxa/include/mach/balloon3.h
@@ -14,6 +14,8 @@
#ifndef ASM_ARCH_BALLOON3_H
#define ASM_ARCH_BALLOON3_H
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
+
enum balloon3_features {
BALLOON3_FEATURE_OHCI,
BALLOON3_FEATURE_MMC,
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h
index f3c3493b468d..c030d955bbd7 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/include/mach/corgi.h
@@ -13,6 +13,7 @@
#ifndef __ASM_ARCH_CORGI_H
#define __ASM_ARCH_CORGI_H 1
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
/*
* Corgi (Non Standard) GPIO Definitions
diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h
index 2628e7b72116..00cfbbbf73f7 100644
--- a/arch/arm/mach-pxa/include/mach/csb726.h
+++ b/arch/arm/mach-pxa/include/mach/csb726.h
@@ -11,6 +11,8 @@
#ifndef CSB726_H
#define CSB726_H
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
#define CSB726_GPIO_IRQ_LAN 52
#define CSB726_GPIO_IRQ_SM501 53
#define CSB726_GPIO_MMC_DETECT 100
diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h
index dba14b6503ad..f7df27bbb42e 100644
--- a/arch/arm/mach-pxa/include/mach/gumstix.h
+++ b/arch/arm/mach-pxa/include/mach/gumstix.h
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/* BTRESET - Reset line to Bluetooth module, active low signal. */
#define GPIO_GUMSTIX_BTRESET 7
diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h
index 22a96f87232b..7e63f4680271 100644
--- a/arch/arm/mach-pxa/include/mach/idp.h
+++ b/arch/arm/mach-pxa/include/mach/idp.h
@@ -23,6 +23,7 @@
* IDP hardware.
*/
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
#define IDP_FLASH_PHYS (PXA_CS0_PHYS)
#define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS)
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h
index 2c4471336570..b184f296023b 100644
--- a/arch/arm/mach-pxa/include/mach/palmld.h
+++ b/arch/arm/mach-pxa/include/mach/palmld.h
@@ -13,6 +13,8 @@
#ifndef _INCLUDE_PALMLD_H_
#define _INCLUDE_PALMLD_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h
index 0bd4f036c72f..e342c5921405 100644
--- a/arch/arm/mach-pxa/include/mach/palmt5.h
+++ b/arch/arm/mach-pxa/include/mach/palmt5.h
@@ -15,6 +15,8 @@
#ifndef _INCLUDE_PALMT5_H_
#define _INCLUDE_PALMT5_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h
index c383a21680b6..81c727b3cfd2 100644
--- a/arch/arm/mach-pxa/include/mach/palmtc.h
+++ b/arch/arm/mach-pxa/include/mach/palmtc.h
@@ -16,6 +16,8 @@
#ifndef _INCLUDE_PALMTC_H_
#define _INCLUDE_PALMTC_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h
index f2e530380253..92bc1f05300d 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/include/mach/palmtx.h
@@ -16,6 +16,8 @@
#ifndef _INCLUDE_PALMTX_H_
#define _INCLUDE_PALMTX_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h
index 6bf28de228bd..86ebd7b6c960 100644
--- a/arch/arm/mach-pxa/include/mach/pcm027.h
+++ b/arch/arm/mach-pxa/include/mach/pcm027.h
@@ -23,6 +23,8 @@
* Definitions of CPU card resources only
*/
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/* phyCORE-PXA270 (PCM027) Interrupts */
#define PCM027_IRQ(x) (IRQ_BOARD_START + (x))
#define PCM027_BTDET_IRQ PCM027_IRQ(0)
diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
index 0260aaa2fc17..7e544c14967e 100644
--- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
+++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
@@ -20,6 +20,7 @@
*/
#include <mach/pcm027.h>
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/*
* definitions relevant only when the PCM-990
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h
index f32ff75dcca8..b56b19351a03 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/include/mach/poodle.h
@@ -15,6 +15,8 @@
#ifndef __ASM_ARCH_POODLE_H
#define __ASM_ARCH_POODLE_H 1
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/*
* GPIOs
*/
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 0bfe6507c95d..25c9f62e46aa 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -15,8 +15,8 @@
#define __ASM_ARCH_SPITZ_H 1
#endif
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */
#include <linux/fb.h>
-#include <linux/gpio.h>
/* Spitz/Akita GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index 2bb0e862598c..0497d95cef25 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -13,6 +13,8 @@
#ifndef _ASM_ARCH_TOSA_H_
#define _ASM_ARCH_TOSA_H_ 1
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
+
/* TOSA Chip selects */
#define TOSA_LCDC_PHYS PXA_CS4_PHYS
/* Internel Scoop */
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h
index d2ca01053f69..ae3ca013afab 100644
--- a/arch/arm/mach-pxa/include/mach/trizeps4.h
+++ b/arch/arm/mach-pxa/include/mach/trizeps4.h
@@ -10,6 +10,8 @@
#ifndef _TRIPEPS4_H_
#define _TRIPEPS4_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/* physical memory regions */
#define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */
#define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 338640631e08..05fa505df585 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select ARM_GIC
select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 1db2a5ca9ab8..8c09a8393fb6 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -25,6 +25,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of.h>
+#include <linux/memblock.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
@@ -41,6 +42,18 @@
void __iomem *zynq_scu_base;
+/**
+ * zynq_memory_init - Initialize special memory
+ *
+ * We need to stop things allocating the low memory as DMA can't work in
+ * the 1st 512K of memory.
+ */
+static void __init zynq_memory_init(void)
+{
+ if (!__pa(PAGE_OFFSET))
+ memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
+}
+
static struct platform_device zynq_cpuidle_device = {
.name = "cpuidle-zynq",
};
@@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
.init_machine = zynq_init_machine,
.init_time = zynq_timer_init,
.dt_compat = zynq_dt_match,
+ .reserve = zynq_memory_init,
.restart = zynq_system_reset,
MACHINE_END
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d15a88..7ea641b7aa7d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
+ pteval_t prot_pte_s2;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f08c133cc25..a623cb3ad012 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -232,12 +232,16 @@ __setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
+ .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
+ s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@@ -508,7 +512,8 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
- hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* ARMv6 and above have extended page tables.
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 45dc29f85d56..32b3558321c4 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -208,7 +208,6 @@ __v6_setup:
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@@ -218,6 +217,8 @@ __v6_setup:
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
+ @ complete invalidations
adr r5, v6_crval
ldmia r5, {r5, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index bd1781979a39..74f6033e76dd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -351,7 +351,6 @@ __v7_setup:
4: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
- dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@@ -360,6 +359,7 @@ __v7_setup:
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
+ dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
+#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
+#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 22fb66590dcd..dba48a5d5bb9 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
KBUILD_DEFCONFIG := atstk1002_defconfig
-KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
+KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
KBUILD_AFLAGS += -mrelax -mno-pic
KBUILD_CFLAGS_MODULE += -mno-relax
LDFLAGS_vmlinux += --relax
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
index 9764a1a1073e..c1466a872b9c 100644
--- a/arch/avr32/boards/mimc200/fram.c
+++ b/arch/avr32/boards/mimc200/fram.c
@@ -11,6 +11,7 @@
#define FRAM_VERSION "1.0"
#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/io.h>
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index cfb9fe1b8df9..c7c64a63c29f 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -17,5 +17,6 @@ generic-y += scatterlist.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += vga.h
generic-y += xor.h
generic-y += hash.h
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index fc6483f83ccc..4f5ec2bb7172 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr);
#define iounmap(addr) \
__iounmap(addr)
+#define ioremap_wc ioremap_nocache
+
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad6818e..150866b2a3fe 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
}
extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern int __dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 9e39ceb1d19f..d4dd41fb951b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -172,10 +172,20 @@ struct eeh_ops {
};
extern struct eeh_ops *eeh_ops;
-extern int eeh_subsystem_enabled;
+extern bool eeh_subsystem_enabled;
extern raw_spinlock_t confirm_error_lock;
extern int eeh_probe_mode;
+static inline bool eeh_enabled(void)
+{
+ return eeh_subsystem_enabled;
+}
+
+static inline void eeh_set_enable(bool mode)
+{
+ eeh_subsystem_enabled = mode;
+}
+
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
@@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
-#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
/*
* Reads from a device which has been isolated by EEH will return
@@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *);
#else /* !CONFIG_EEH */
+static inline bool eeh_enabled(void)
+{
+ return false;
+}
+
+static inline void eeh_set_enable(bool mode) { }
+
static inline int eeh_init(void)
{
return 0;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index d750336b171d..623f2971ce0e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PPC64
- return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
#else
return __pte(pte_update(ptep, ~0UL, 0));
#endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f7a8036579b5..42632c7a2a4e 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -77,6 +77,7 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
+ void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
/* Pure 2^n version of get_order */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index bc141c950b1e..eb9261024f51 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
+ unsigned long set,
int huge)
{
#ifdef PTE_ATOMIC_UPDATES
@@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
- *ptep = __pte(old & ~clr);
+ *ptep = __pte((old & ~clr) | set);
#endif
/* huge pages use the old page table lock */
if (!huge)
@@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 0);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 1);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
/*
@@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
return __pte(old);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t * ptep)
{
- pte_update(mm, addr, ptep, ~0UL, 0);
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
@@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
unsigned long addr,
- pmd_t *pmdp, unsigned long clr);
+ pmd_t *pmdp,
+ unsigned long clr,
+ unsigned long set);
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
@@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
+ old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
@@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
return;
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index f83b6f3e1b39..3ebb188c3ff5 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
+#define ptep_set_numa ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
+ return;
+}
+
#define pmd_numa pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
return pte_numa(pmd_pte(pmd));
}
+#define pmdp_set_numa pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
+ return;
+}
+
#define pmd_mknonnuma pmd_mknonnuma
static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe15de4..d0e784e0ff48 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
#ifdef __powerpc64__
+extern char __start_interrupts[];
extern char __end_interrupts[];
extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+ unsigned long end)
+{
+ unsigned long real_start, real_end;
+ real_start = __start_interrupts - _stext;
+ real_end = __end_interrupts - _stext;
+
+ return start < (unsigned long)__va(real_end) &&
+ (unsigned long)__va(real_start) < end;
+}
+
static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
{
return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 0d9cecddf8a4..c53f5f6d1761 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -4,11 +4,11 @@
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0x100000
-#define VDSO64_LBASE 0x100000
+#define VDSO32_LBASE 0x0
+#define VDSO64_LBASE 0x0
/* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE VDSO32_LBASE
+#define VDSO32_MBASE 0x100000
#define VDSO_VERSION_STRING LINUX_2.6.15
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97ccdcb..ee78f6e49d64 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-int dma_set_mask(struct device *dev, u64 dma_mask)
+int __dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- if (ppc_md.dma_set_mask)
- return ppc_md.dma_set_mask(dev, dma_mask);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
*dev->dma_mask = dma_mask;
return 0;
}
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (ppc_md.dma_set_mask)
+ return ppc_md.dma_set_mask(dev, dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 148db72a8c43..e7b76a6bf150 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
+#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/export.h>
@@ -89,7 +90,7 @@
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
-int eeh_subsystem_enabled;
+bool eeh_subsystem_enabled = false;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/*
@@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.total_mmio_ffs++;
- if (!eeh_subsystem_enabled)
+ if (!eeh_enabled())
return 0;
if (!edev) {
@@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name)
return -EEXIST;
}
+static int eeh_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ eeh_set_enable(false);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block eeh_reboot_nb = {
+ .notifier_call = eeh_reboot_notifier,
+};
+
/**
* eeh_init - EEH initialization
*
@@ -778,6 +790,14 @@ int eeh_init(void)
if (machine_is(powernv) && cnt++ <= 0)
return ret;
+ /* Register reboot notifier */
+ ret = register_reboot_notifier(&eeh_reboot_nb);
+ if (ret) {
+ pr_warn("%s: Failed to register notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
/* call platform initialization function */
if (!eeh_ops) {
pr_warning("%s: Platform EEH operation not found\n",
@@ -822,7 +842,7 @@ int eeh_init(void)
return ret;
}
- if (eeh_subsystem_enabled)
+ if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
pr_warning("EEH: No capable adapters found\n");
@@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev)
struct device_node *dn;
struct eeh_dev *edev;
- if (!dev || !eeh_subsystem_enabled)
+ if (!dev || !eeh_enabled())
return;
pr_debug("EEH: Adding device %s\n", pci_name(dev));
@@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
- if (!dev || !eeh_subsystem_enabled)
+ if (!dev || !eeh_enabled())
return;
edev = pci_dev_to_eeh_dev(dev);
@@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev)
static int proc_eeh_show(struct seq_file *m, void *v)
{
- if (0 == eeh_subsystem_enabled) {
+ if (!eeh_enabled()) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7bb30dca4e19..fdc679d309ec 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
*/
if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
return NULL;
+
driver = eeh_pcid_get(dev);
- if (driver && driver->err_handler)
- return NULL;
+ if (driver) {
+ eeh_pcid_put(dev);
+ if (driver->err_handler)
+ return NULL;
+ }
/* Remove it from PCI subsystem */
pr_debug("EEH: Removing %s without EEH sensitive driver\n",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d773dd440a45..88e3ec6e1d96 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
+ /*
+ * Disable iommu bypass, otherwise the user can DMA to all of
+ * our physical memory via the bypass window instead of just
+ * the pages that has been explicitly mapped into the iommu
+ */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
+
+ /* The kernel owns the device now, we can restore the iommu bypass */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, true);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9729b23bfb0a..1d0848bba049 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void)
#ifdef CONFIG_PPC64
cpu_nr = i;
#else
+#ifdef CONFIG_SMP
cpu_nr = get_hard_smp_processor_id(i);
+#else
+ cpu_nr = 0;
#endif
+#endif
+
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f7340da8..015ae55c1868 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
/* Values we need to export to the second kernel via the device tree. */
static phys_addr_t kernel_end;
+static phys_addr_t crashk_base;
static phys_addr_t crashk_size;
+static unsigned long long mem_limit;
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(phys_addr_t),
- .value = &crashk_res.start,
+ .value = &crashk_base
};
static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
static struct property memory_limit_prop = {
.name = "linux,memory-limit",
.length = sizeof(unsigned long long),
- .value = &memory_limit,
+ .value = &mem_limit,
};
+#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
+
static void __init export_crashk_values(struct device_node *node)
{
struct property *prop;
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
of_remove_property(node, prop);
if (crashk_res.start != 0) {
+ crashk_base = cpu_to_be_ulong(crashk_res.start),
of_add_property(node, &crashk_base_prop);
- crashk_size = resource_size(&crashk_res);
+ crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
of_add_property(node, &crashk_size_prop);
}
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
* memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used.
*/
+ mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop);
}
@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
of_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */
- kernel_end = __pa(_end);
+ kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop);
export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d648f60..59d229a2a3e0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
+static unsigned long htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
- .value = &htab_size_bytes,
+ .value = &htab_size,
};
static int __init export_htab_values(void)
@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
if (prop)
of_remove_property(node, prop);
- htab_base = __pa(htab_address);
+ htab_base = cpu_to_be64(__pa(htab_address));
of_add_property(node, &htab_base_prop);
+ htab_size = cpu_to_be64(htab_size_bytes);
of_add_property(node, &htab_size_prop);
of_node_put(node);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 879f09620f83..7c6bb4b17b49 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
+/*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+ */
_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
lwz r10,THREAD+KSP_LIMIT(r2)
- addi r11,r3,THREAD_INFO_GAP
+ addi r11,r4,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
stw r10,8(r1)
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..1482327cfeba 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@ _GLOBAL(relocate)
* R_PPC64_RELATIVE ones.
*/
mtctr r8
-5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */
- cmpwi r0,R_PPC64_RELATIVE
+5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
+ cmpdi r0,R_PPC64_RELATIVE
bne 6f
ld r6,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2b0da27eaee4..04cc4fcca78b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
hw_cpu = get_hard_smp_processor_id(i);
+#else
+ hw_cpu = 0;
+#endif
+
critirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 79683d0393f5..6ac107ac402a 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -6,7 +6,7 @@
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
- .incbin "arch/powerpc/kernel/vdso32/vdso32.so"
+ .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
.balign PAGE_SIZE
vdso32_end:
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 8df9e2463007..df60fca6a13d 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -6,7 +6,7 @@
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so"
+ .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index de6881259aef..d766d6ee33fe 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
+ /*
+ * If relocatable, check if it overlaps interrupt vectors that
+ * are copied down to real 0. For relocatable kernel
+ * (e.g. kdump case) we copy interrupt vectors down to real
+ * address 0. Mark that region as executable. This is
+ * because on p8 system with relocation on exception feature
+ * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
+ * in order to execute the interrupt handlers in virtual
+ * mode the vector region need to be marked as executable.
+ */
+ if ((PHYSICAL_START > MEMORY_START) &&
+ overlaps_interrupt_vector_text(vaddr, vaddr + step))
+ tprot &= ~HPTE_R_N;
+
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 65b7b65e8708..62bf5e8e78da 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
}
unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, unsigned long clr)
+ pmd_t *pmdp, unsigned long clr,
+ unsigned long set)
{
unsigned long old, tmp;
@@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
- : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY)
+ : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
old = pmd_val(*pmdp);
- *pmdp = __pmd(old & ~clr);
+ *pmdp = __pmd((old & ~clr) | set);
#endif
if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp);
@@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT);
+ pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
}
/*
@@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long old;
pgtable_t *pgtable_slot;
- old = pmd_hugepage_update(mm, addr, pmdp, ~0UL);
+ old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
* We have pmd == none and we are holding page_table_lock.
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index a770df2dae70..6c0b1f5f8d2c 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
- pte_update(mm, addr, pte, 0, 0);
+ pte_update(mm, addr, pte, 0, 0, 0);
addr += PAGE_SIZE;
++pte;
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e863d7c..67cf22083f4c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
+ if (cpuhw->bhrb_users)
+ ppmu->config_bhrb(cpuhw->bhrb_filter);
+
write_mmcr0(cpuhw, mmcr0);
/*
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
- if (cpuhw->bhrb_users)
- ppmu->config_bhrb(cpuhw->bhrb_filter);
local_irq_restore(flags);
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd2f13f..96cee20dcd34 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
#define PM_BRU_FIN 0x10068
#define PM_BR_MPRED_CMPL 0x400f6
+/* All L1 D cache load references counted at finish, gated by reject */
+#define PM_LD_REF_L1 0x100ee
+/* Load Missed L1 */
+#define PM_LD_MISS_L1 0x3e054
+/* Store Missed L1 */
+#define PM_ST_MISS_L1 0x300f0
+/* L1 cache data prefetches */
+#define PM_L1_PREF 0x0d8b8
+/* Instruction fetches from L1 */
+#define PM_INST_FROM_L1 0x04080
+/* Demand iCache Miss */
+#define PM_L1_ICACHE_MISS 0x200fd
+/* Instruction Demand sectors wriittent into IL1 */
+#define PM_L1_DEMAND_WRITE 0x0408c
+/* Instruction prefetch written into IL1 */
+#define PM_IC_PREF_WRITE 0x0408e
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define PM_DATA_FROM_L3 0x4c042
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+#define PM_DATA_FROM_L3MISS 0x300fe
+/* All successful D-side store dispatches for this thread */
+#define PM_L2_ST 0x17080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define PM_L2_ST_MISS 0x17082
+/* Total HW L3 prefetches(Load+store) */
+#define PM_L3_PREF_ALL 0x4e052
+/* Data PTEG reload */
+#define PM_DTLB_MISS 0x300fc
+/* ITLB Reloaded */
+#define PM_ITLB_MISS 0x400fc
+
/*
* Raw event encoding for POWER8:
@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_PREF,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L2_ST,
+ [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
static struct power_pmu power8_pmu = {
.name = "POWER8",
.n_counter = 6,
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
+ .cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32,
};
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index e1e71618b70c..f51474336460 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb,
/* We simply send special EEH event */
if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
- (events & OPAL_EVENT_PCI_ERROR))
+ (events & OPAL_EVENT_PCI_ERROR) &&
+ eeh_enabled())
eeh_send_failure_event(NULL);
return 0;
@@ -489,8 +490,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
static int ioda_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
- struct eeh_dev *edev;
- struct pci_dev *dev;
+ struct pci_bus *bus;
int ret;
/*
@@ -519,31 +519,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
if (pe->type & EEH_PE_PHB) {
ret = ioda_eeh_phb_reset(hose, option);
} else {
- if (pe->type & EEH_PE_DEVICE) {
- /*
- * If it's device PE, we didn't refer to the parent
- * PCI bus yet. So we have to figure it out indirectly.
- */
- edev = list_first_entry(&pe->edevs,
- struct eeh_dev, list);
- dev = eeh_dev_to_pci_dev(edev);
- dev = dev->bus->self;
- } else {
- /*
- * If it's bus PE, the parent PCI bus is already there
- * and just pick it up.
- */
- dev = pe->bus->self;
- }
-
- /*
- * Do reset based on the fact that the direct upstream bridge
- * is root bridge (port) or not.
- */
- if (dev->bus->number == 0)
+ bus = eeh_pe_bus_get(pe);
+ if (pci_is_root_bus(bus))
ret = ioda_eeh_root_reset(hose, option);
else
- ret = ioda_eeh_bridge_reset(hose, dev, option);
+ ret = ioda_eeh_bridge_reset(hose, bus->self, option);
}
return ret;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index a79fddc5e74e..a59788e83b8b 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Enable EEH explicitly so that we will do EEH check
* while accessing I/O stuff
*/
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
/* Save memory bars */
eeh_save_bars(edev);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7d6dcc6d5fa9..3b2b4fb3585b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
+#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
return;
pe = &phb->ioda.pe_array[pdn->pe_number];
+ WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
}
+static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+ struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+ uint64_t top;
+ bool bypass = false;
+
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return -ENODEV;;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (pe->tce_bypass_enabled) {
+ top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+ bypass = (dma_mask >= top);
+ }
+
+ if (bypass) {
+ dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
+ set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_offset(&pdev->dev, pe->tce_bypass_base);
+ } else {
+ dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
+ set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ }
+ return 0;
+}
+
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
+static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+{
+ struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+ tce32_table);
+ uint16_t window_id = (pe->pe_number << 1 ) + 1;
+ int64_t rc;
+
+ pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
+ if (enable) {
+ phys_addr_t top = memblock_end_of_DRAM();
+
+ top = roundup_pow_of_two(top);
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ top);
+ } else {
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ 0);
+
+ /*
+ * We might want to reset the DMA ops of all devices on
+ * this PE. However in theory, that shouldn't be necessary
+ * as this is used for VFIO/KVM pass-through and the device
+ * hasn't yet been returned to its kernel driver
+ */
+ }
+ if (rc)
+ pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
+ else
+ pe->tce_bypass_enabled = enable;
+}
+
+static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ /* TVE #1 is selected by PCI address bit 59 */
+ pe->tce_bypass_base = 1ull << 59;
+
+ /* Install set_bypass callback for VFIO */
+ pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+
+ /* Enable bypass by default */
+ pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+}
+
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe)
{
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ /* Also create a bypass window */
+ pnv_pci_ioda2_setup_bypass_pe(phb, pe);
return;
fail:
if (pe->tce32_seg >= 0)
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+ phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
/* Setup shutdown function for kexec */
phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b555ebc57ef5..95633d79ef5d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
+int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->dma_set_mask)
+ return phb->dma_set_mask(phb, pdev, dma_mask);
+ return __dma_set_mask(&pdev->dev, dma_mask);
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 13f1942a9a5f..cde169442775 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
struct iommu_table tce32_table;
phys_addr_t tce_inval_reg_phys;
- /* XXX TODO: Add support for additional 64-bit iommus */
+ /* 64-bit TCE bypass region */
+ bool tce_bypass_enabled;
+ uint64_t tce_bypass_base;
/* MSIs. MVE index is identical for for 32 and 64 bit MSI
* and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
+ int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
+ u64 dma_mask);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819be1f95..0051e108ef0f 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
static inline void pnv_smp_init(void) { }
#endif
+struct pci_dev;
+
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
+extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
+
+static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ return -ENODEV;
+}
#endif
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 21166f65c97c..110f4fbd319f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/bug.h>
#include <linux/cpuidle.h>
+#include <linux/pci.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
+static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev_is_pci(dev))
+ return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
+
static void pnv_shutdown(void)
{
/* Let the PCI code clear up IODA tables */
@@ -238,6 +246,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = powernv_idle,
.calibrate_decr = generic_calibrate_decr,
+ .dma_set_mask = pnv_dma_set_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 37300f6ee244..80b1d57c306a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@ config PPC_PSERIES
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
+ select ARCH_RANDOM
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 9ef3cc8ebc11..8a8f0472d98f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
enable = 1;
if (enable) {
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 70670a2d9cf2..c413ec158ff5 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
- const __be32 *pcie_link_speed_stats;
+ u32 pcie_link_speed_stats[2];
+ int rc;
bus = bridge->bus;
@@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
- pcie_link_speed_stats = of_get_property(pdn,
- "ibm,pcie-link-speed-stats", NULL);
- if (pcie_link_speed_stats)
+ rc = of_property_read_u32_array(pdn,
+ "ibm,pcie-link-speed-stats",
+ &pcie_link_speed_stats[0], 2);
+ if (!rc)
break;
}
of_node_put(pdn);
- if (!pcie_link_speed_stats) {
+ if (rc) {
pr_err("no ibm,pcie-link-speed-stats property\n");
return 0;
}
- switch (be32_to_cpup(pcie_link_speed_stats)) {
+ switch (pcie_link_speed_stats[0]) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->max_bus_speed = PCIE_SPEED_5_0GT;
break;
+ case 0x04:
+ bus->max_bus_speed = PCIE_SPEED_8_0GT;
+ break;
default:
bus->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
- switch (be32_to_cpup(pcie_link_speed_stats)) {
+ switch (pcie_link_speed_stats[1]) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->cur_bus_speed = PCIE_SPEED_5_0GT;
break;
+ case 0x04:
+ bus->cur_bus_speed = PCIE_SPEED_8_0GT;
+ break;
default:
bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
break;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8e639d7cbda7..972df0ffd4dc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
- if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
- (image->type != KEXEC_TYPE_CRASH)) {
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
rc = pSeries_disable_reloc_on_exc();
if (rc != H_SUCCESS)
pr_warning("Warning: Failed to disable relocation on "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed4cd16..8209744b2829 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
/* Default: read HW settings */
if (flow_type == IRQ_TYPE_DEFAULT) {
- switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
- MPIC_INFO(VECPRI_SENSE_MASK))) {
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_EDGE_RISING;
- break;
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_EDGE_FALLING;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_LEVEL_HIGH;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_LEVEL_LOW;
- break;
- }
+ int vold_ps;
+
+ vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+ MPIC_INFO(VECPRI_SENSE_MASK));
+
+ if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_EDGE_RISING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_EDGE_FALLING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_LEVEL_HIGH;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+ else
+ WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
}
/* Apply to irq desc */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a90731b3d44a..b07909850f77 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@ static void get_output_lock(void)
if (xmon_speaker == me)
return;
+
for (;;) {
- if (xmon_speaker == 0) {
- last_speaker = cmpxchg(&xmon_speaker, 0, me);
- if (last_speaker == 0)
- return;
- }
- timeout = 10000000;
+ last_speaker = cmpxchg(&xmon_speaker, 0, me);
+ if (last_speaker == 0)
+ return;
+
+ /*
+ * Wait a full second for the lock, we might be on a slow
+ * console, but check every 100us.
+ */
+ timeout = 10000;
while (xmon_speaker == last_speaker) {
- if (--timeout > 0)
+ if (--timeout > 0) {
+ udelay(100);
continue;
+ }
+
/* hostile takeover */
prev = cmpxchg(&xmon_speaker, last_speaker, me);
if (prev == last_speaker)
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
xmon_fault_jmp[cpu] = recurse_jmp;
- cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
release_output_lock();
}
+ cpumask_set_cpu(cpu, &cpus_in_xmon);
+
waiting:
secondary = 1;
while (secondary && !xmon_gate) {
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3b978c472d08..3d6b9f81cc68 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -132,6 +132,8 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern void efi_setup_page_tables(void);
extern void __init old_map_region(efi_memory_desc_t *md);
+extern void __init runtime_code_page_mkexec(void);
+extern void __init efi_runtime_mkexec(void);
struct efi_setup_data {
u64 fw_vendor;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 24b6fd10625a..8e28bf2fc3ef 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC);
- if (cpu_has(c, X86_FEATURE_SMAP))
+ if (cpu_has(c, X86_FEATURE_SMAP)) {
+#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP);
+#else
+ clear_in_cr4(X86_CR4_SMAP);
+#endif
+ }
}
/*
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d4bdd253fea7..e6253195a301 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end;
}
-static int
-do_ftrace_mod_code(unsigned long ip, const void *new_code)
+static unsigned long text_ip_addr(unsigned long ip)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa_symbol(ip));
- return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
+ return ip;
}
static const unsigned char *ftrace_nop_replace(void)
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
+ ip = text_ip_addr(ip);
+
/* replace the text with the new text */
- if (do_ftrace_mod_code(ip, new_code))
+ if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
return -EPERM;
sync_core();
@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
-int ftrace_update_ftrace_func(ftrace_func_t func)
+static unsigned long ftrace_update_func;
+
+static int update_ftrace_func(unsigned long ip, void *new)
{
- unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ unsigned char old[MCOUNT_INSN_SIZE];
int ret;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(ip, (unsigned long)func);
+ memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+
+ ftrace_update_func = ip;
+ /* Make sure the breakpoints see the ftrace_update_func update */
+ smp_wmb();
/* See comment above by declaration of modifying_ftrace_code */
atomic_inc(&modifying_ftrace_code);
ret = ftrace_modify_code(ip, old, new);
+ atomic_dec(&modifying_ftrace_code);
+
+ return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned char *new;
+ int ret;
+
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
- ret = ftrace_modify_code(ip, old, new);
+ ret = update_ftrace_func(ip, new);
}
- atomic_dec(&modifying_ftrace_code);
-
return ret;
}
static int is_ftrace_caller(unsigned long ip)
{
- if (ip == (unsigned long)(&ftrace_call) ||
- ip == (unsigned long)(&ftrace_regs_call))
+ if (ip == ftrace_update_func)
return 1;
return 0;
@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
-static int ftrace_mod_jmp(unsigned long ip,
- int old_offset, int new_offset)
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
- unsigned char code[MCOUNT_INSN_SIZE];
+ static union ftrace_code_union calc;
- if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
+ /* Jmp not a call (ignore the .e8) */
+ calc.e8 = 0xe9;
+ calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
- if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
- return -EINVAL;
+ /*
+ * ftrace external locks synchronize the access to the static variable.
+ */
+ return calc.code;
+}
- *(int *)(&code[1]) = new_offset;
+static int ftrace_mod_jmp(unsigned long ip, void *func)
+{
+ unsigned char *new;
- if (do_ftrace_mod_code(ip, &code))
- return -EPERM;
+ new = ftrace_jmp_replace(ip, (unsigned long)func);
- return 0;
+ return update_ftrace_func(ip, new);
}
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
- old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
-
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
-
- old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_stub);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 19e5adb49a27..acb3b606613e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
* dance when its actually needed.
*/
- preempt_disable();
+ preempt_disable_notrace();
data = this_cpu_read(cyc2ns.head);
tail = this_cpu_read(cyc2ns.tail);
@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
if (!--data->__count)
this_cpu_write(cyc2ns.tail, data);
}
- preempt_enable();
+ preempt_enable_notrace();
return ns;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d591c895803..6dea040cc3a1 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
+ if (!IS_ENABLED(CONFIG_X86_SMAP))
+ return false;
+
+ if (!static_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
if (error_code & PF_USER)
return false;
@@ -1087,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- if (static_cpu_has(X86_FEATURE_SMAP)) {
- if (unlikely(smap_violation(error_code, regs))) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
+ if (unlikely(smap_violation(error_code, regs))) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
/*
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 4df9591eadad..f15103dff4b4 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -42,7 +42,7 @@ void __init efi_bgrt_init(void)
if (bgrt_tab->header.length < sizeof(*bgrt_tab))
return;
- if (bgrt_tab->version != 1)
+ if (bgrt_tab->version != 1 || bgrt_tab->status != 1)
return;
if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
return;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index d62ec87a2b26..1a201ac7cef8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -792,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
set_memory_nx(addr, npages);
}
-static void __init runtime_code_page_mkexec(void)
+void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
void *p;
@@ -1069,8 +1069,7 @@ void __init efi_enter_virtual_mode(void)
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
- if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
- runtime_code_page_mkexec();
+ efi_runtime_mkexec();
kfree(new_memmap);
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 249b183cf417..0b74cdf7f816 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -77,3 +77,9 @@ void efi_call_phys_epilog(void)
local_irq_restore(efi_rt_eflags);
}
+
+void __init efi_runtime_mkexec(void)
+{
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
+}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6284f158a47d..0c2a234fef1e 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len)
{
efi_setup = phys_addr + sizeof(struct setup_data);
}
+
+void __init efi_runtime_mkexec(void)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
+}
diff --git a/block/blk-core.c b/block/blk-core.c
index c00e0bdeab4a..853f92749202 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
+ uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!uninit_q->flush_rq)
+ goto out_cleanup_queue;
+
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- blk_cleanup_queue(uninit_q);
-
+ goto out_free_flush_rq;
return q;
+
+out_free_flush_rq:
+ kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+ blk_cleanup_queue(uninit_q);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
- return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ return blk_mq_alloc_request(q, rw, gfp_mask);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@@ -1278,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(!q))
return;
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
+ return;
+ }
+
blk_pm_put_request(req);
elv_completed_request(q, req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index bbfc072a79c2..c68613bb4c79 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, true);
+ blk_mq_insert_request(q, rq, at_head, true);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9288aaf35c21..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
- rq = container_of(work, struct request, mq_flush_data);
+ rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
}
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
{
- INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
- kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+ if (rq->q->mq_ops) {
+ INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+ return false;
+ } else {
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- if (q->mq_ops)
- blk_mq_flush_data_insert(rq);
- else {
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
- }
+ queued = blk_flush_queue_rq(rq);
break;
case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
}
kicked = blk_kick_flush(q);
- /* blk_mq_run_flush will run queue */
- if (q->mq_ops)
- return queued;
return kicked | queued;
}
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n;
unsigned long flags = 0;
- if (q->mq_ops) {
- blk_mq_free_request(flush_rq);
+ if (q->mq_ops)
spin_lock_irqsave(&q->mq_flush_lock, flags);
- }
+
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd.
*/
if (queued || q->flush_queue_delayed) {
- if (!q->mq_ops)
- blk_run_queue_async(q);
- else
- /*
- * This can be optimized to only run queues with requests
- * queued if necessary.
- */
- blk_mq_run_queues(q, true);
+ WARN_ON(q->mq_ops);
+ blk_run_queue_async(q);
}
q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
-static void mq_flush_work(struct work_struct *work)
-{
- struct request_queue *q;
- struct request *rq;
-
- q = container_of(work, struct request_queue, mq_flush_work);
-
- /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
- rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
- __GFP_WAIT|GFP_ATOMIC, true);
- rq->cmd_type = REQ_TYPE_FS;
- rq->end_io = flush_end_io;
-
- blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
- kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight.
*/
q->flush_pending_idx ^= 1;
+
if (q->mq_ops) {
- mq_run_flush(q);
- return true;
+ struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ blk_mq_rq_init(hctx, q->flush_rq);
+ q->flush_rq->mq_ctx = ctx;
+
+ /*
+ * Reuse the tag value from the fist waiting request,
+ * with blk-mq the tag is generated during request
+ * allocation and drivers can rely on it being inside
+ * the range they asked for.
+ */
+ q->flush_rq->tag = first_rq->tag;
+ } else {
+ blk_rq_init(q, q->flush_rq);
}
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+ return blk_flush_queue_rq(q->flush_rq);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
- INIT_WORK(&q->mq_flush_work, mq_flush_work);
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2da76c999ef3..97a733cf3d5f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
blk_finish_plug(&plug);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8f8adaa95466..6c583f9c5b65 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (!bio)
return 0;
+ /*
+ * This should probably be returning 0, but blk_add_request_payload()
+ * (Christoph!!!!)
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0;
@@ -161,30 +171,60 @@ new_segment:
*bvprv = *bvec;
}
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
{
struct bio_vec bvec, bvprv = { NULL };
- struct req_iterator iter;
- struct scatterlist *sg;
+ struct bvec_iter iter;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
- /*
- * for each bio in rq
- */
- sg = NULL;
- rq_for_each_segment(bvec, rq, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in rq */
+ if (bio->bi_rw & REQ_DISCARD) {
+ /*
+ * This is a hack - drivers should be neither modifying the
+ * biovec, nor relying on bi_vcnt - but because of
+ * blk_add_request_payload(), a discard bio may or may not have
+ * a payload we need to set up here (thank you Christoph) and
+ * bi_vcnt is really the only way of telling if we need to.
+ */
+
+ if (bio->bi_vcnt)
+ goto single_segment;
+
+ return 0;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+ *sg = sglist;
+ bvec = bio_iovec(bio);
+ sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ return 1;
+ }
+
+ for_each_bio(bio)
+ bio_for_each_segment(bvec, bio, iter)
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+ &nsegs, &cluster);
+ return nsegs;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = NULL;
+ int nsegs = 0;
+
+ if (rq->bio)
+ nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec bvec, bvprv = { NULL };
- struct scatterlist *sg;
- int nsegs, cluster;
- struct bvec_iter iter;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- sg = NULL;
- bio_for_each_segment(bvec, bio, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in bio */
+ struct scatterlist *sg = NULL;
+ int nsegs;
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+ bio->bi_next = next;
if (sg)
sg_mark_end(sg);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5d70edc9855f..83ae96c51a27 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
- int cpu;
+ unsigned int cpu;
if (!tags)
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 57039fcd9c93..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -226,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
return rq;
}
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
- gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
{
struct request *rq;
if (blk_mq_queue_enter(q))
return NULL;
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
if (rq)
blk_mq_put_ctx(rq->mq_ctx);
return rq;
@@ -258,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
/*
* Re-init and set pdu, if we have it
*/
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
blk_rq_init(hctx->queue, rq);
@@ -305,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
bio_endio(bio, error);
}
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_end_io(struct request *rq, int error)
{
struct bio *bio = rq->bio;
unsigned int bytes = 0;
@@ -330,48 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error)
else
blk_mq_free_request(rq);
}
+EXPORT_SYMBOL(blk_mq_end_io);
-void __blk_mq_end_io(struct request *rq, int error)
-{
- if (!blk_mark_rq_complete(rq))
- blk_mq_complete_request(rq, error);
-}
-
-static void blk_mq_end_io_remote(void *data)
+static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
- __blk_mq_end_io(rq, rq->errors);
+ rq->q->softirq_done_fn(rq);
}
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
int cpu;
- if (!ctx->ipi_redirect)
- return __blk_mq_end_io(rq, error);
+ if (!ctx->ipi_redirect) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
cpu = get_cpu();
if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
- rq->errors = error;
- rq->csd.func = blk_mq_end_io_remote;
+ rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
} else {
- __blk_mq_end_io(rq, error);
+ rq->q->softirq_done_fn(rq);
}
put_cpu();
}
-EXPORT_SYMBOL(blk_mq_end_io);
-static void blk_mq_start_request(struct request *rq)
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ if (unlikely(blk_should_fake_timeout(rq->q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
{
struct request_queue *q = rq->q;
@@ -384,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
*/
rq->deadline = jiffies + q->rq_timeout;
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * Make sure space for the drain appears. We know we can do
+ * this because max_hw_segments has been adjusted to be one
+ * fewer than the device can handle.
+ */
+ rq->nr_phys_segments++;
+ }
+
+ /*
+ * Flag the last request in the series so that drivers know when IO
+ * should be kicked off, if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers should do kick off.
+ * If drive is busy, the last request might not have the bit set.
+ */
+ if (last)
+ rq->cmd_flags |= REQ_END;
}
static void blk_mq_requeue_request(struct request *rq)
@@ -392,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ rq->cmd_flags &= ~REQ_END;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
}
struct blk_mq_timeout_data {
@@ -559,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_start_request(rq);
- /*
- * Last request in the series. Flag it as such, this
- * enables drivers to know when IO should be kicked off,
- * if they don't do it on a per-request basis.
- *
- * Note: the flag isn't the only condition drivers
- * should do kick off. If drive is busy, the last
- * request might not have the bit set.
- */
- if (list_empty(&rq_list))
- rq->cmd_flags |= REQ_END;
+ blk_mq_start_request(rq, list_empty(&rq_list));
ret = q->mq_ops->queue_rq(hctx, rq);
switch (ret) {
@@ -589,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
break;
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
- rq->errors = -EIO;
case BLK_MQ_RQ_QUEUE_ERROR:
+ rq->errors = -EIO;
blk_mq_end_io(rq, rq->errors);
break;
}
@@ -693,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work)
}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+ struct request *rq, bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
/*
@@ -709,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
}
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool run_queue)
+ bool at_head, bool run_queue)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
@@ -728,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
rq->mq_ctx = ctx;
}
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -760,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
/* ctx->cpu might be offline */
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -798,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -888,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
return;
@@ -950,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_mq_free_request(hctx, ctx, rq);
else {
blk_mq_bio_to_request(rq, bio);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -1309,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
reg->queue_depth = BLK_MQ_MAX_DEPTH;
}
- /*
- * Set aside a tag for flush requests. It will only be used while
- * another flush request is in progress but outside the driver.
- *
- * TODO: only allocate if flushes are supported
- */
- reg->queue_depth++;
- reg->reserved_tags++;
-
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
return ERR_PTR(-EINVAL);
@@ -1360,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->mq_ops = reg->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ q->sg_reserved_size = INT_MAX;
+
blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout);
if (reg->timeout)
blk_queue_rq_timeout(q, reg->timeout);
+ if (reg->ops->complete)
+ blk_queue_softirq_done(q, reg->ops->complete);
+
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
- if (blk_mq_init_hw_queues(q, reg, driver_data))
+ q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+ cache_line_size()), GFP_KERNEL);
+ if (!q->flush_rq)
goto err_hw;
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_flush_rq;
+
blk_mq_map_swqueue(q);
mutex_lock(&all_q_mutex);
@@ -1378,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
mutex_unlock(&all_q_mutex);
return q;
+
+err_flush_rq:
+ kfree(q->flush_rq);
err_hw:
kfree(q->mq_map);
err_map:
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5c3917984b00..ed0035cd458e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,13 +22,13 @@ struct blk_mq_ctx {
struct kobject kobj;
};
-void __blk_mq_end_io(struct request *rq, int error);
-void blk_mq_complete_request(struct request *rq, int error);
+void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
/*
* CPU hotplug helpers
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8095c4a21fc0..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
if (q->mq_ops)
blk_mq_free_queue(q);
+ kfree(q->flush_rq);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
case BLK_EH_HANDLED:
/* Can we use req->errors here? */
if (q->mq_ops)
- blk_mq_complete_request(req, req->errors);
+ __blk_mq_complete_request(req);
else
__blk_complete_request(req);
break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8f7a2b..d23b415b8a28 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dying(q)) ||
+ if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0b6ae6eb5c4a..368f9ddb8480 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev,
ACPI_COMPANION_SET(dev, adev);
dev->release = acpi_container_release;
ret = device_register(dev);
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
-
+ }
adev->driver_data = dev;
return 1;
}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c431c88faaff..e9b3081c4fe9 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(struct dock_station *ds, u32 event)
{
acpi_handle handle = ds->handle;
- struct acpi_device *ad;
+ struct acpi_device *adev = NULL;
int surprise_removal = 0;
/*
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+ acpi_bus_get_device(handle, &adev);
+ if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 20a7517bd339..52b8181ddafd 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4126,12 +4126,14 @@ static int mv_platform_probe(struct platform_device *pdev)
clk_prepare_enable(hpriv->port_clks[port]);
sprintf(port_number, "port%d", port);
- hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number);
+ hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+ port_number);
if (IS_ERR(hpriv->port_phys[port])) {
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
- if ((rc != -EPROBE_DEFER) && (rc != -ENODEV))
- dev_warn(&pdev->dev, "error getting phy");
+ if (rc != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error getting phy %d",
+ rc);
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 62a76076b548..f1a9198dfe5a 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1925,7 +1925,7 @@ static int ucode_init(loader_block *lb, amb_dev *dev)
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
- const char *errmsg = 0;
+ const char *errmsg = NULL;
int res;
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 9587e959ce1a..13ed54cf2c31 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -639,9 +639,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->hbnr.init = NUM_HB;
card->hbnr.max = MAX_HB;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
card->sm_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
card->lg_addr = 0x00000000;
card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
@@ -979,7 +979,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->sm_addr;
handle2 = card->sm_handle;
card->sm_addr = 0x00000000;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
} else { /* (!sm_addr) */
card->sm_addr = addr1;
@@ -993,7 +993,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->lg_addr;
handle2 = card->lg_handle;
card->lg_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
} else { /* (!lg_addr) */
card->lg_addr = addr1;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e3fb496c7163..943cf0d6abaf 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -760,7 +760,7 @@ static irqreturn_t solos_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void solos_bh(unsigned long card_arg)
+static void solos_bh(unsigned long card_arg)
{
struct solos_card *card = (void *)card_arg;
uint32_t card_flags;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
goto out;
}
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
master_remove_components(master);
goto out;
}
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
master->bound = false;
}
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..61d6d62cc0d3 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
- seq_printf(s, "\nDma-buf Objects:\n");
- seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
- seq_printf(s,
- "\tERROR locking buffer object: skipping\n");
+ seq_puts(s,
+ "\tERROR locking buffer object: skipping\n");
continue;
}
- seq_printf(s, "\t");
-
- seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
- buf_obj->exp_name, buf_obj->size,
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+ buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter));
+ (long)(buf_obj->file->f_count.counter),
+ buf_obj->exp_name);
- seq_printf(s, "\t\tAttached Devices:\n");
+ seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
- seq_printf(s, "\t\t");
+ seq_puts(s, "\t");
- seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
- seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3107282a9741..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
+};
+enum {
NULL_Q_BIO = 0,
NULL_Q_RQ = 1,
NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- if (cmd->rq) {
- if (queue_mode == NULL_Q_MQ)
- blk_mq_end_io(cmd->rq, 0);
- else {
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
- }
- } else if (cmd->bio)
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_end_io(cmd->rq, 0);
+ return;
+ case NULL_Q_RQ:
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ break;
+ case NULL_Q_BIO:
bio_endio(cmd->bio, 0);
+ break;
+ }
- if (queue_mode != NULL_Q_MQ)
- free_cmd(cmd);
+ free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
+ entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- blk_end_request_all(rq, 0);
-}
-
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
- struct completion_queue *cq;
- struct llist_node *entry, *next;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- entry = llist_del_all(&cq->list);
-
- while (entry) {
- next = entry->next;
- cmd = llist_entry(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
- entry = next;
- }
-}
-
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
- struct call_single_data *data = &cmd->csd;
- int cpu = get_cpu();
- struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
- cmd->ll_list.next = NULL;
-
- if (llist_add(&cmd->ll_list, &cq->list)) {
- data->func = null_ipi_cmd_end_io;
- data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
- }
-
- put_cpu();
+ end_cmd(rq->special);
}
-#endif /* CONFIG_SMP */
-
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
/* Complete IO by inline, softirq or timer */
switch (irqmode) {
- case NULL_IRQ_NONE:
- end_cmd(cmd);
- break;
case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
- null_cmd_end_ipi(cmd);
-#else
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_RQ:
+ blk_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+ * XXX: no proper submitting cpu information available.
+ */
+ end_cmd(cmd);
+ break;
+ }
+ break;
+ case NULL_IRQ_NONE:
end_cmd(cmd);
-#endif
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
+ .complete = null_softirq_done_fn,
};
static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,6 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP)
- if (irqmode == NULL_IRQ_SOFTIRQ) {
- pr_warn("null_blk: softirq completions not available.\n");
- pr_warn("null_blk: using direct completions.\n");
- irqmode = NULL_IRQ_NONE;
- }
-#endif
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
{
- struct request *req = vbr->req;
+ struct virtblk_req *vbr = req->special;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- virtblk_request_done(vbr);
+ blk_mq_complete_request(vbr->req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
+ .complete = virtblk_request_done,
};
static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4b97b86da926..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
BUG_ON(num != 0);
}
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
- INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -420,7 +420,6 @@ finished:
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
- INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
schedule_work(&blkif->persistent_purge_work);
pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
return;
@@ -625,9 +624,23 @@ purge_gnt_list:
print_stats(blkif);
}
- /* Since we are shutting down remove all pages from the buffer */
- shrink_free_pagepool(blkif, 0 /* All */);
+ /* Drain pending purge work */
+ flush_work(&blkif->persistent_purge_work);
+ if (log_stats)
+ print_stats(blkif);
+
+ blkif->xenblkd = NULL;
+ xen_blkif_put(blkif);
+
+ return 0;
+}
+
+/*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +649,8 @@ purge_gnt_list:
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
- if (log_stats)
- print_stats(blkif);
-
- blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
-
- return 0;
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
}
/*
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
do {
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
+ if (atomic_read(&blkif->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req->blkif,
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
+ make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
- xen_blkif_put(pending_req->blkif);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
}
- free_req(pending_req->blkif, pending_req);
+ xen_blkif_put(blkif);
}
}
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(blkif);
+ atomic_inc(&blkif->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
/* for barrier (drain) requests */
struct completion drain_complete;
atomic_t drain;
+ atomic_t inflight;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->free_pages_num = 0;
atomic_set(&blkif->persistent_gnt_in_use, 0);
+ atomic_set(&blkif->inflight, 0);
+ INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+ /* Remove all persistent grants and the cache of ballooned pages. */
+ xen_blkbk_free_caches(blkif);
+
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+ BUG_ON(blkif->free_pages_num != 0);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
+ BUG_ON(!list_empty(&blkif->free_pages));
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
- (struct blkif_request_segment_aligned) {
+ (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
depends on RAW_DRIVER
+ range 1 65536
default "256"
help
The maximum number of RAW devices that are supported.
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 974b2db2fe10..0595dc6c453e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
return;
}
-static void __init kona_timers_init(struct device_node *node)
-{
- u32 freq;
- struct clk *external_clk;
-
- external_clk = of_clk_get_by_name(node, NULL);
-
- if (!IS_ERR(external_clk)) {
- arch_timer_rate = clk_get_rate(external_clk);
- clk_prepare_enable(external_clk);
- } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
- arch_timer_rate = freq;
- } else {
- panic("unable to determine clock-frequency");
- }
-
- /* Setup IRQ numbers */
- timers.tmr_irq = irq_of_parse_and_map(node, 0);
-
- /* Setup IO addresses */
- timers.tmr_regs = of_iomap(node, 0);
-
- kona_timer_disable_and_clear(timers.tmr_regs);
-}
-
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
@@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = {
static void __init kona_timer_init(struct device_node *node)
{
- kona_timers_init(node);
+ u32 freq;
+ struct clk *external_clk;
+
+ if (!of_device_is_available(node)) {
+ pr_info("Kona Timer v1 marked as disabled in device tree\n");
+ return;
+ }
+
+ external_clk = of_clk_get_by_name(node, NULL);
+
+ if (!IS_ERR(external_clk)) {
+ arch_timer_rate = clk_get_rate(external_clk);
+ clk_prepare_enable(external_clk);
+ } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
+ arch_timer_rate = freq;
+ } else {
+ pr_err("Kona Timer v1 unable to determine clock-frequency");
+ return;
+ }
+
+ /* Setup IRQ numbers */
+ timers.tmr_irq = irq_of_parse_and_map(node, 0);
+
+ /* Setup IO addresses */
+ timers.tmr_regs = of_iomap(node, 0);
+
+ kona_timer_disable_and_clear(timers.tmr_regs);
+
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 79606f473f48..c788abf1c457 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -51,8 +51,6 @@ static inline int32_t div_fp(int32_t x, int32_t y)
return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
}
-static u64 energy_divisor;
-
struct sample {
int32_t core_pct_busy;
u64 aperf;
@@ -630,12 +628,10 @@ static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
struct sample *sample;
- u64 energy;
intel_pstate_sample(cpu);
sample = &cpu->samples[cpu->sample_ptr];
- rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
intel_pstate_adjust_busy_pstate(cpu);
@@ -644,7 +640,6 @@ static void intel_pstate_timer_func(unsigned long __data)
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
- div64_u64(energy, energy_divisor),
sample->freq);
intel_pstate_set_sample_time(cpu);
@@ -926,7 +921,6 @@ static int __init intel_pstate_init(void)
int cpu, rc = 0;
const struct x86_cpu_id *id;
struct cpu_defaults *cpu_info;
- u64 units;
if (no_load)
return -ENODEV;
@@ -960,9 +954,6 @@ static int __init intel_pstate_init(void)
if (rc)
goto out;
- rdmsrl(MSR_RAPL_POWER_UNIT, units);
- energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
-
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry);
}
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return page_to_phys(vmalloc_to_page(addr))
+ + offset_in_page(addr);
+ else
+ return __pa(addr);
+}
+
static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl)
{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries;
while (len) {
- entry->ptr = __pa(buf);
+ entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) {
/* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
- op.out = __pa(slout.entries);
+ op.csbcpb = nx842_get_pa(csbcpb);
+ op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) {
/*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
+ op.csbcpb = nx842_get_pa(csbcpb);
/*
* max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i];
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.out = __pa(outbuf);
+ op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout);
- op.out = __pa(slout.entries);
+ op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout);
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9bed1a2a67a1..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -346,6 +346,7 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
+ select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b0..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int slot_cnt;
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
edac_dbg(0, "\n");
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
* user space has updated our poll period value, need to
* reset our workq delays
*/
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 8ec1747b1c39..b335c6ab5efe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- long l;
+ unsigned long l;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtol(val, 0, &l);
+ ret = kstrtoul(val, 0, &l);
if (ret)
return ret;
- if (!l || ((int)l != l))
+
+ if (l < 1000)
return -EINVAL;
- *((int *)kp->arg) = l;
+
+ *((unsigned long *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 697338772b64..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -403,6 +403,7 @@ config GPIO_GRGPIO
config GPIO_TB10X
bool
+ select GENERIC_IRQ_CHIP
select OF_GPIO
comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088ac59f..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
module_platform_driver(bcm_kona_gpio_driver);
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d3550274b8f7..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef5fab8..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
static int intel_gpio_runtime_idle(struct device *dev)
{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
+ int err = pm_schedule_suspend(dev, 500);
+ return err ?: -EBUSY;
}
static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136eceda62..7081304d6797 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
#error GPIO32 option is not enabled for your xtensa core variant
#endif
+#if XCHAL_HAVE_CP
+
static inline unsigned long enable_cp(unsigned long *cpenable)
{
unsigned long flags;
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
local_irq_restore(flags);
}
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ *cpenable = 0; /* avoid uninitialized value warning */
+ return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
return 1; /* input only */
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index dffc836144cc..f4dc9b7a3831 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
+ case DRM_CAP_CURSOR_WIDTH:
+ if (dev->mode_config.cursor_width)
+ req->value = dev->mode_config.cursor_width;
+ else
+ req->value = 64;
+ break;
+ case DRM_CAP_CURSOR_HEIGHT:
+ if (dev->mode_config.cursor_height)
+ req->value = dev->mode_config.cursor_height;
+ else
+ req->value = 64;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9d096a0c5f8d..215131ab1dd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
ret = exynos_drm_subdrv_open(dev, file);
- if (ret) {
- kfree(file_priv);
- file->driver_priv = NULL;
- }
+ if (ret)
+ goto out;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
- kfree(file_priv);
- return PTR_ERR(anon_filp);
+ ret = PTR_ERR(anon_filp);
+ goto out;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
+out:
+ kfree(file_priv);
+ file->driver_priv = NULL;
+ return ret;
}
static void exynos_drm_preclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
- };
+ }
return reg_type;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
- * quf == NULL condition means all event deletion.
+ * qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
@@ -59,19 +60,6 @@
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
-
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
+ union hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
return;
}
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
- case HDMI_PACKET_TYPE_AUI:
+ case HDMI_INFOFRAME_TYPE_AUDIO:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
+ union hdmi_infoframe infoframe;
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..faa77f543a07 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
{
uint8_t buf[PB(5) + 1];
+ memset(buf, 0, sizeof(buf));
buf[HB(0)] = 0x84;
buf[HB(1)] = 0x01;
buf[HB(2)] = 10;
- buf[PB(0)] = 0;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ reg_write(encoder, REG_ENABLE_SPACE, 0x00);
}
/* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
kfree(priv);
}
@@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
- priv->current_page = 0;
+ priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec) {
+ kfree(priv);
+ return -ENODEV;
+ }
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a2bf8e3f739..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7fd2fd2f0a5..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
va_list tmp;
va_copy(tmp, args);
- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
return;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d8fcb1b6f7..9fec71175571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9fa24347963a..4c1672809493 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ring->id == RCS)
len += 6;
+ /*
+ * BSpec MI_DISPLAY_FLIP for IVB:
+ * "The full packet must be contained within the same cache line."
+ *
+ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+ * cacheline, if we ever start emitting more commands before
+ * the MI_DISPLAY_FLIP we may need to first emit everything else,
+ * then do the cacheline alignment, and finally emit the
+ * MI_DISPLAY_FLIP.
+ */
+ ret = intel_ring_cacheline_align(ring);
+ if (ret)
+ goto err_unpin;
+
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ede4e8e290d..57552eb386b0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
- bool has_aux_irq = true;
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
+ int retry;
if (WARN_ON(send_bytes > 16))
return -E2BIG;
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
- break;
+ return send_bytes;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ usleep_range(400, 500);
else
return -EIO;
}
- return send_bytes;
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
/* Write a single byte to the aux channel in native mode */
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
int reply_bytes;
uint8_t ack;
int ret;
+ int retry;
if (WARN_ON(recv_bytes > 19))
return -E2BIG;
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ usleep_range(400, 500);
else
return -EIO;
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
static int
@@ -1869,10 +1876,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
intel_enable_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b1dc33f47899..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4e960ec7419f..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -226,6 +226,8 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#define MAX_DSLP 1500
+
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
- } else if (dslp > 500) {
+ } else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
- WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
- dslp = 500;
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b7f1742caf87..31b36c5ac894 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
return 0;
}
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+ int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+ int ret;
+
+ if (num_dwords == 0)
+ return 0;
+
+ ret = intel_ring_begin(ring, num_dwords);
+ if (ret)
+ return ret;
+
+ while (num_dwords--)
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4fe252..0b243ce33714 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
spinlock_t lock;
bool stale;
uint32_t width, height;
+ uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we currently hold a scanout ref to: */
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
struct drm_framebuffer *fb;
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void update_fb(struct drm_crtc *crtc, bool async,
- struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.fb = new_fb;
mdp4_crtc->fb = new_fb;
- if (!async) {
- /* enable vblank to pick up the old_fb */
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
- }
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
- }
- flush |= ovlp2flush(mdp4_crtc->ovlp);
-
- DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
- mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- atomic_or(pending, &mdp4_crtc->pending);
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
-}
-
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
if (!fb)
return;
+ drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
- update_fb(crtc, false, crtc->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
return 0;
}
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
+ int ret;
- update_fb(crtc, false, crtc->fb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
- return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, true, new_fb);
+ update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -542,12 +591,15 @@ fail:
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
- MDP4_DMA_CURSOR_POS_X(x) |
- MDP4_DMA_CURSOR_POS_Y(y));
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
return 0;
}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
- MDP4_PIPE_SRC_XY_X(crtc_x) |
- MDP4_PIPE_SRC_XY_Y(crtc_y));
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
update_fb(crtc, crtc->fb);
update_scanout(crtc, crtc->fb);
- return ret;
+ return 0;
}
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
fail:
if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr(&obj->base);
+ ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
- mutex_lock(&submit->dev->struct_mutex);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
- mutex_unlock(&submit->dev->struct_mutex);
ww_acquire_fini(&submit->ticket);
kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_drm_private *priv = dev->dev_private;
int i, ret;
- mutex_lock(&dev->struct_mutex);
-
submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e88145ba1bf5..d310c195bdfe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv4c.o
nouveau-y += core/subdev/mc/nv50.o
nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1b653dd74a70..08b88591ed60 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
@@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 940eaa5d8b9a..9ad722e4e087 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
if (conf != ~0) {
if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
u32 soff = (ffs(outp.or) - 1) * 0x08;
- u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 ctrl = nv_rd32(priv, 0x610794 + soff);
u32 datarate;
switch ((ctrl & 0x000f0000) >> 16) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 9a850fe19515..54c1b5b471cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 30ed19c52e05..7a367c402978 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
ustatus &= ~0x04030000;
}
if (ustatus && display) {
- nv_error("%s - TP%d:", name, i);
+ nv_error(priv, "%s - TP%d:", name, i);
nouveau_bitfield_print(nv50_mpc_traps, ustatus);
pr_cont("\n");
ustatus = 0;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index adc88b73d911..3c6738edd127 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -47,6 +47,7 @@ struct nouveau_mc_oclass {
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv4c_mc_oclass;
extern struct nouveau_oclass *nv50_mc_oclass;
extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index aa0fbbec7f08..ef0c9c4a8cc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
u16 pcir;
int i;
+ /* there is no prom on nv4x IGP's */
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ return;
+
/* enable access to rom */
if (device->card_type >= NV_50)
pcireg = 0x088050;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 9159a5ccee93..265d1253624a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
.fini = _nouveau_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv10_ram_oclass,
+ .base.ram = &nv1a_ram_oclass,
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index b0d5c31606c1..81a408e7d034 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
extern const struct nouveau_mc_intr nv04_mc_intr[];
int nv04_mc_init(struct nouveau_object *);
void nv40_mc_msi_rearm(struct nouveau_mc *);
+int nv44_mc_init(struct nouveau_object *object);
int nv50_mc_init(struct nouveau_object *);
extern const struct nouveau_mc_intr nv50_mc_intr[];
extern const struct nouveau_mc_intr nvc0_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 3bfee5c6c4f2..cc4d0d2d886e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -24,7 +24,7 @@
#include "nv04.h"
-static int
+int
nv44_mc_init(struct nouveau_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
new file mode 100644
index 000000000000..a75c35ccf25c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+
+#include "nv04.h"
+
+static void
+nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+ struct nv04_mc_priv *priv = (void *)pmc;
+ nv_wr08(priv, 0x088050, 0xff);
+}
+
+struct nouveau_oclass *
+nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x4c),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv44_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nv04_mc_intr,
+ .msi_rearm = nv4c_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 488686d490c0..4aed1714b9ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- if (!node->memtype)
+ if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 78c8e7146d56..89c484d8ac26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_device;
+ dev->irq_enabled = true;
+
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_object *device;
+ dev->irq_enabled = false;
device = drm->client.base.device;
drm_put_dev(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7f2eff..471347edc27e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state)
{
struct nouveau_device *device = nouveau_dev(priv);
- if (device->chipset >= 0x40)
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ nv_wr32(device, 0x088060, state);
+ else if (device->chipset >= 0x40)
nv_wr32(device, 0x088054, state);
else
nv_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a9338c85630f..0d19f4f94d5a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
- int bpc = radeon_get_monitor_bpc(connector);
+ int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
@@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
/* Set NUM_BANKS. */
- if (rdev->family >= CHIP_BONAIRE) {
+ if (rdev->family >= CHIP_TAHITI) {
unsigned tileb, index, num_banks, tile_split_bytes;
/* Calculate the macrotile mode index. */
@@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ if (rdev->family >= CHIP_BONAIRE)
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ else
+ num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
- /* SI and older. */
- if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
+ /* NI and older. */
+ if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a42d61571f49..2cec2ab02f80 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
- if (connector)
- bpc = radeon_get_monitor_bpc(connector);
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
switch (bpc) {
case 0:
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0fbd36f3d4e9..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
#include "cypress_dpm.h"
#include "btc_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ if (rdev->family >= CHIP_CEDAR) {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ } else {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc);
+ }
+ }
+}
+
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
+# define CURRENT_PROFILE_INDEX_SHIFT 4
+
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f2b9e21ce4da..5623e7542d99 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
- break;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b6e01d5d2cce..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
int kv_dpm_late_enable(struct radeon_device *rdev)
{
- int ret;
+ int ret = 0;
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c351226ecb31..ca814276b075 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
- u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56140b4e5bb2..cdbc4171fe73 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3991,6 +3991,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a8ac1cd6b4c..024db37b1832 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -135,6 +135,9 @@ extern int radeon_hard_reset;
/* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
/*
* Semaphores.
*/
-/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f74db43346fd..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+ .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b3bc433eed4c..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d680608f6f5b..fbd8b930f2be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->max_cursor_width = CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
}
+ dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 1b783f0e6d3a..15e44a7281ab 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 2b42aa1914f2..9006b32d5eed 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,14 +34,15 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
+ uint32_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
- &(*semaphore)->sa_bo, 8, 8, true);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
+ 8 * RADEON_NUM_SYNCS, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
- *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ cpu_addr[i] = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
@@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int ring)
{
+ unsigned count = 0;
int i, r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
return -EINVAL;
}
+ if (++count > RADEON_NUM_SYNCS) {
+ /* not enough room, wait manually */
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
@@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring);
+
+ semaphore->gpu_addr += 8;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 80c595aba359..b5f63f5e22a3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct rv7xx_ps *ps = rv770_get_ps(rps);
u32 sclk, mclk;
- u16 vddc;
struct rv7xx_pl *pl;
switch (index) {
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = 300;
-
- /* quirks */
- /* ASUS K70AF */
- if ((rdev->pdev->device == 0x9553) &&
- (rdev->pdev->subsystem_vendor == 0x1043) &&
- (rdev->pdev->subsystem_device == 0x1c42))
- switch_limit = 200;
+ u32 switch_limit = 200; /* 300 */
/* RV770 */
/* mclk switching doesn't seem to work reliably on desktop RV770s */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 09ec4f6c53bb..83578324e5d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6338,6 +6338,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0471501338fb..0a2f5b4bca43 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f121efe12dc5..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
struct sumo_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2d447192d6f7..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
struct trinity_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 824550db3fed..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
- return;
}
/**
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e7497..764be36397fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index b645647b7776..bb594c11605e 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -1223,9 +1223,19 @@ typedef enum {
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
-
+#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
+#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
+#define SVGA_3D_CMD_GB_MOB_FENCE 1133
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
+#define SVGA_3D_CMD_NOP_ERROR 1137
+
+#define SVGA_3D_CMD_RESERVED1 1138
+#define SVGA_3D_CMD_RESERVED2 1139
+#define SVGA_3D_CMD_RESERVED3 1140
+#define SVGA_3D_CMD_RESERVED4 1141
+#define SVGA_3D_CMD_RESERVED5 1142
#define SVGA_3D_CMD_MAX 1142
#define SVGA_3D_CMD_FUTURE_MAX 3000
@@ -1973,8 +1983,7 @@ struct {
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
typedef
@@ -1984,15 +1993,13 @@ struct {
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
typedef
struct {
SVGAOTableType type;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
/*
@@ -2005,8 +2012,7 @@ struct SVGA3dCmdDefineGBMob {
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
@@ -2017,8 +2023,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
typedef
struct SVGA3dCmdDestroyGBMob {
SVGAMobId mobid;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
/*
@@ -2031,8 +2036,7 @@ struct SVGA3dCmdRedefineGBMob {
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
/*
@@ -2045,8 +2049,7 @@ struct SVGA3dCmdDefineGBMob64 {
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
/*
@@ -2059,8 +2062,7 @@ struct SVGA3dCmdRedefineGBMob64 {
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
/*
@@ -2070,8 +2072,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
typedef
struct SVGA3dCmdUpdateGBMobMapping {
SVGAMobId mobid;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
/*
@@ -2087,7 +2088,8 @@ struct SVGA3dCmdDefineGBSurface {
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
-} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+} __packed
+SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
/*
* Destroy a guest-backed surface.
@@ -2096,7 +2098,8 @@ struct SVGA3dCmdDefineGBSurface {
typedef
struct SVGA3dCmdDestroyGBSurface {
uint32 sid;
-} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+} __packed
+SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
/*
* Bind a guest-backed surface to an object.
@@ -2106,7 +2109,8 @@ typedef
struct SVGA3dCmdBindGBSurface {
uint32 sid;
SVGAMobId mobid;
-} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+} __packed
+SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
/*
* Conditionally bind a mob to a guest backed surface if testMobid
@@ -2123,7 +2127,7 @@ struct{
SVGAMobId testMobid;
SVGAMobId mobid;
uint32 flags;
-}
+} __packed
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
/*
@@ -2135,7 +2139,8 @@ typedef
struct SVGA3dCmdUpdateGBImage {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
-} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+} __packed
+SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
/*
* Update an entire guest-backed surface.
@@ -2145,7 +2150,8 @@ struct SVGA3dCmdUpdateGBImage {
typedef
struct SVGA3dCmdUpdateGBSurface {
uint32 sid;
-} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+} __packed
+SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
/*
* Readback an image in a guest-backed surface.
@@ -2155,7 +2161,8 @@ struct SVGA3dCmdUpdateGBSurface {
typedef
struct SVGA3dCmdReadbackGBImage {
SVGA3dSurfaceImageId image;
-} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+} __packed
+SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
/*
* Readback an entire guest-backed surface.
@@ -2165,7 +2172,8 @@ struct SVGA3dCmdReadbackGBImage {
typedef
struct SVGA3dCmdReadbackGBSurface {
uint32 sid;
-} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+} __packed
+SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
/*
* Readback a sub rect of an image in a guest-backed surface. After
@@ -2179,7 +2187,7 @@ struct SVGA3dCmdReadbackGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
-}
+} __packed
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
/*
@@ -2190,7 +2198,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
typedef
struct SVGA3dCmdInvalidateGBImage {
SVGA3dSurfaceImageId image;
-} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+} __packed
+SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
/*
* Invalidate an entire guest-backed surface.
@@ -2200,7 +2209,8 @@ struct SVGA3dCmdInvalidateGBImage {
typedef
struct SVGA3dCmdInvalidateGBSurface {
uint32 sid;
-} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+} __packed
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
/*
* Invalidate a sub rect of an image in a guest-backed surface. After
@@ -2214,7 +2224,7 @@ struct SVGA3dCmdInvalidateGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
-}
+} __packed
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
/*
@@ -2224,7 +2234,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
typedef
struct SVGA3dCmdDefineGBContext {
uint32 cid;
-} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+} __packed
+SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
/*
* Destroy a guest-backed context.
@@ -2233,7 +2244,8 @@ struct SVGA3dCmdDefineGBContext {
typedef
struct SVGA3dCmdDestroyGBContext {
uint32 cid;
-} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+} __packed
+SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
/*
* Bind a guest-backed context.
@@ -2252,7 +2264,8 @@ struct SVGA3dCmdBindGBContext {
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
-} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+} __packed
+SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
/*
* Readback a guest-backed context.
@@ -2262,7 +2275,8 @@ struct SVGA3dCmdBindGBContext {
typedef
struct SVGA3dCmdReadbackGBContext {
uint32 cid;
-} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+} __packed
+SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
/*
* Invalidate a guest-backed context.
@@ -2270,7 +2284,8 @@ struct SVGA3dCmdReadbackGBContext {
typedef
struct SVGA3dCmdInvalidateGBContext {
uint32 cid;
-} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+} __packed
+SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
/*
* Define a guest-backed shader.
@@ -2281,7 +2296,8 @@ struct SVGA3dCmdDefineGBShader {
uint32 shid;
SVGA3dShaderType type;
uint32 sizeInBytes;
-} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+} __packed
+SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
/*
* Bind a guest-backed shader.
@@ -2291,7 +2307,8 @@ typedef struct SVGA3dCmdBindGBShader {
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
-} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+} __packed
+SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
/*
* Destroy a guest-backed shader.
@@ -2299,7 +2316,8 @@ typedef struct SVGA3dCmdBindGBShader {
typedef struct SVGA3dCmdDestroyGBShader {
uint32 shid;
-} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+} __packed
+SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
typedef
struct {
@@ -2314,14 +2332,16 @@ struct {
* Note that FLOAT and INT constants are 4-dwords in length, while
* BOOL constants are 1-dword in length.
*/
-} SVGA3dCmdSetGBShaderConstInline;
+} __packed
+SVGA3dCmdSetGBShaderConstInline;
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
typedef
struct {
uint32 cid;
SVGA3dQueryType type;
-} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+} __packed
+SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
typedef
struct {
@@ -2329,7 +2349,8 @@ struct {
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
-} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+} __packed
+SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
/*
@@ -2346,21 +2367,22 @@ struct {
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
-} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+} __packed
+SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
typedef
struct {
SVGAMobId mobid;
uint32 fbOffset;
uint32 initalized;
-}
+} __packed
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
typedef
struct {
SVGAMobId mobid;
uint32 gartOffset;
-}
+} __packed
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
@@ -2368,7 +2390,7 @@ typedef
struct {
uint32 gartOffset;
uint32 numPages;
-}
+} __packed
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
@@ -2385,27 +2407,27 @@ struct {
int32 xRoot;
int32 yRoot;
uint32 flags;
-}
+} __packed
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
-}
+} __packed
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
SVGA3dSurfaceImageId image;
-}
+} __packed
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
SVGA3dBox box;
-}
+} __packed
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
/*
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
index 8369c3ba10fe..ef3385096145 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -38,8 +38,11 @@
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
+#define u64 uint64_t
+#define U32_MAX ((u32)~0U)
#endif /* __KERNEL__ */
@@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
static inline u32 clamped_umul32(u32 a, u32 b)
{
- uint64_t tmp = (uint64_t) a*b;
- return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+ u64 tmp = (u64) a*b;
+ return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
}
static inline const struct svga3d_surface_desc *
@@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- u32 total_size = 0;
+ u64 total_size = 0;
u32 mip;
for (mip = 0; mip < num_mip_levels; mip++) {
@@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
- return total_size;
+ return (u32) min_t(u64, total_size, (u64) U32_MAX);
}
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 71defa4d2d75..11323dd5196f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,10 +169,17 @@ enum {
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
+ SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
+ SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
- SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
+ SVGA_REG_CMD_PREPEND_LOW = 53,
+ SVGA_REG_CMD_PREPEND_HIGH = 54,
+ SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+ SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+ SVGA_REG_MOB_MAX_SIZE = 57,
+ SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 9426c53fb483..1e80152674b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -551,8 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
- cmd->body.shid =
- cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -585,8 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid =
- cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -628,8 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value =
- cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 3bdc0adc656d..0083cbf99edf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->memory_size = 512*1024*1024;
}
dev_priv->max_mob_pages = 0;
+ dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint64_t mem_size =
vmw_read(dev_priv,
@@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+ dev_priv->max_mob_size =
+ vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
} else
dev_priv->prim_bb_mem = dev_priv->vram_size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index ecaa302a6154..9e4be1725985 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -386,6 +386,7 @@ struct vmw_private {
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
+ uint32_t max_mob_size;
uint32_t memory_size;
bool has_gmr;
bool has_mob;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 269b85cc875a..efb575a7996c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -602,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
- __le32 cid;
+ uint32_t cid;
} *cmd;
cmd = container_of(header, struct vmw_cid_cmd, header);
@@ -1835,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
return 0;
}
-static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
@@ -2032,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
goto out_invalid;
entry = &vmw_cmd_entries[cmd_id];
+ if (unlikely(!entry->func))
+ goto out_invalid;
+
if (unlikely(!entry->user_allow && !sw_context->kernel))
goto out_privileged;
@@ -2469,7 +2472,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (dev_priv->has_mob) {
ret = vmw_rebind_contexts(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_unlock_binding;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index f9881f9e62bd..47b70949bf3a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -102,6 +102,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
vmw_fp->gb_aware = true;
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
break;
+ case DRM_VMW_PARAM_MAX_MOB_SIZE:
+ param->value = dev_priv->max_mob_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 217d941b8176..ee3856578a12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -371,13 +371,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-int vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
- size_t shader_size,
- size_t offset,
- SVGA3dShaderType shader_type,
- struct ttm_object_file *tfile,
- u32 *handle)
+static int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
@@ -779,6 +779,8 @@ vmw_compat_shader_man_create(struct vmw_private *dev_priv)
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 497558127bb3..f822fd2a1ada 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3bfac3accd22..cc32a6f96c64 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8fae6d1414cc..c24908f14934 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -157,6 +157,7 @@ struct mousevsc_dev {
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
struct hid_device *hid_device;
+ u8 input_buf[HID_MAX_BUFFER_SIZE];
};
@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device,
struct synthhid_msg *hid_msg;
struct mousevsc_dev *input_dev = hv_get_drvdata(device);
struct synthhid_input_report *input_report;
+ size_t len;
pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
(packet->offset8 << 3));
@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device,
(struct synthhid_input_report *)pipe_msg->data;
if (!input_dev->init_complete)
break;
- hid_input_report(input_dev->hid_device,
- HID_INPUT_REPORT, input_report->buffer,
- input_report->header.size, 1);
+
+ len = min(input_report->header.size,
+ (u32)sizeof(input_dev->input_buf));
+ memcpy(input_dev->input_buf, input_report->buffer, len);
+ hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
+ input_dev->input_buf, len, 1);
break;
default:
pr_err("unsupported hid msg type - type %d len %d",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a5248f2cc07..22f28d6b33a8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -240,6 +241,7 @@
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
+#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -451,6 +453,9 @@
#define USB_VENDOR_ID_INTEL_1 0x8087
#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
+#define USB_VENDOR_ID_STM_0 0x0483
+#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
+
#define USB_VENDOR_ID_ION 0x15e4
#define USB_DEVICE_ID_ICADE 0x0132
@@ -619,6 +624,8 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
+#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
+#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -644,6 +651,7 @@
#define USB_VENDOR_ID_NEXIO 0x1870
#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d50e7313b171..a713e6211419 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work)
/* fall back to generic raw-output-report */
len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- buf = kmalloc(len, GFP_KERNEL);
+ buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6ef6eed3091..404a3a8a82f1 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
+ .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
+ .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f134d73beca1..221d503f1c24 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
USB_DEVICE_ID_MULTITOUCH_3200) },
+ /* FocalTech Panels */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+ USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) },
+
/* GeneralTouch panel */
{ .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 46f4480035bc..9c22e14c57f0 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
USB_DEVICE_ID_INTEL_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
+ USB_DEVICE_ID_STM_HID_SENSOR),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d1f81f52481a..42eebd14de1f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
int ret;
int len = i2c_hid_get_report_length(rep) - 2;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = hid_alloc_report_buf(rep, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 175ec0afb70c..dbd83878ff99 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- int t;
init_completion(&msginfo->waitevent);
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
* Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- return -ETIMEDOUT;
- }
+ wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
- unsigned int result;
+ s64 result;
int val, ret;
ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
/* unit: mV */
- result = pdata->pullup_uv * val;
+ result = pdata->pullup_uv * (s64) val;
result >>= 12;
- return result;
+ return (int)result;
}
static const struct of_device_id ntc_match[] = {
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b8c5187b9ee0..d52d84937ad3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
- MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
unsigned long ctrl_reg;
struct i2c_msg *msg = drv_data->msgs;
+ if (!drv_data->offload_enabled)
+ return -EOPNOTSUPP;
+
drv_data->msg = msg;
drv_data->byte_posn = 0;
drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->msgs++;
drv_data->num_msgs--;
- if (!(drv_data->offload_enabled &&
- mv64xxx_i2c_offload_msg(drv_data))) {
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
writel(drv_data->cntl_bits,
drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
- if (!mv64xxx_i2c_offload_msg(drv_data))
- break;
- else
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_START:
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
+ /* Can we offload this msg ? */
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ /* No, switch to standard path */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ }
break;
case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->offload_enabled) {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- } else {
- mv64xxx_i2c_prepare_for_io(drv_data, msg);
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- }
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+
drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec9220df04..bfec313492b3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ },
};
-#define BMA180_CHANNEL(_index) { \
+#define BMA180_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
- .indexed = 1, \
- .channel = (_index), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = (_index), \
+ .scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
.realbits = 14, \
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}
static const struct iio_chan_spec bma180_channels[] = {
- BMA180_CHANNEL(AXIS_X),
- BMA180_CHANNEL(AXIS_Y),
- BMA180_CHANNEL(AXIS_Z),
- IIO_CHAN_SOFT_TIMESTAMP(4),
+ BMA180_CHANNEL(X),
+ BMA180_CHANNEL(Y),
+ BMA180_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f2ee2f..360259266d4f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client,
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get(&client->dev, "vref");
+ vref = devm_regulator_get_optional(&client->dev, "vref");
if (!IS_ERR(vref)) {
int vref_uv;
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
ADIS16300_SCAN_INCLI_X,
ADIS16300_SCAN_INCLI_Y,
ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660dfe135..7c582f7ae34e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
- IIO_CHAN_SOFT_TIMESTAMP(12)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
},
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
- IIO_CHAN_SOFT_TIMESTAMP(14)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(8)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d8110157f2d..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- if (chan->channel == IIO_MOD_LIGHT_BOTH)
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
chip->calib0 = calib_from_sysfs(val);
- else
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
return 0;
}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
- long m)
+ long mask)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
- switch (m) {
+ switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = chip->data0;
else
*val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..05423543f89d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
/*
* Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
*
* Since 1uT = 0.01 gauss, our final scale factor becomes:
*
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
*
* Since ASA doesn't change, we cache the resultant scale factor into the
* device context in ak8975_setup().
*/
- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
return 0;
}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
return ak8975_read_axis(indio_dev, chan->address, val);
case IIO_CHAN_INFO_SCALE:
- *val = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d3bdb1..f66955fb3509 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
while (n-- > 0)
len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", vals[n][0], vals[n][1]);
+ "%d.%06d ", vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
struct mag3110_data *data = iio_priv(indio_dev);
int rate;
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ ret = -ENOMEM;
iounmap(mmio_regs);
goto bail4;
}
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10;
}
- if (c2_register_device(c2dev))
+ ret = c2_register_device(c2dev);
+ if (ret)
goto bail10;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4;
/* Initialize cached the adapter limits */
- if (c2_rnic_query(c2dev, &c2dev->props))
+ err = c2_rnic_query(c2dev, &c2dev->props);
+ if (err)
goto bail5;
/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 45126879ad28..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst;
}
+ neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c2702f549f10..e81c5547e647 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
static void update_gids_task(struct work_struct *work)
{
struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work)
struct mlx4_cmd_mailbox *mailbox;
union ib_gid *gids;
int err;
- int i;
struct mlx4_dev *dev = gw->dev->dev;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work)
gids = mailbox->buf;
memcpy(gids, gw->gids, sizeof(gw->gids));
- for (i = 1; i < gw->dev->num_ports + 1; i++) {
- if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
- IB_LINK_LAYER_ETHERNET) {
- err = mlx4_cmd(dev, mailbox->dma,
- MLX4_SET_PORT_GID_TABLE << 8 | i,
- 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
- if (err)
- pr_warn(KERN_WARNING
- "set port %d command failed\n", i);
- }
+ if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ pr_warn(KERN_WARNING
+ "set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1425,7 +1437,8 @@ free:
}
static int update_gid_table(struct mlx4_ib_dev *dev, int port,
- union ib_gid *gid, int clear)
+ union ib_gid *gid, int clear,
+ int default_gid)
{
struct update_gid_work *work;
int i;
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
int found = -1;
int max_gids;
- max_gids = dev->dev->caps.gid_table_len[port];
- for (i = 0; i < max_gids; ++i) {
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
- sizeof(*gid)))
- found = i;
-
- if (clear) {
- if (found >= 0) {
- need_update = 1;
- dev->iboe.gid_table[port - 1][found] = zgid;
- break;
- }
- } else {
- if (found >= 0)
- break;
-
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
+ if (default_gid) {
+ free = 0;
+ } else {
+ max_gids = dev->dev->caps.gid_table_len[port];
+ for (i = 1; i < max_gids; ++i) {
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
sizeof(*gid)))
- free = i;
+ found = i;
+
+ if (clear) {
+ if (found >= 0) {
+ need_update = 1;
+ dev->iboe.gid_table[port - 1][found] =
+ zgid;
+ break;
+ }
+ } else {
+ if (found >= 0)
+ break;
+
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i],
+ &zgid, sizeof(*gid)))
+ free = i;
+ }
}
}
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
return 0;
}
-static int reset_gid_table(struct mlx4_ib_dev *dev)
+static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
{
- struct update_gid_work *work;
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
+{
+ struct update_gid_work *work;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return -ENOMEM;
- memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
+
+ memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
memset(work->gids, 0, sizeof(work->gids));
INIT_WORK(&work->work, reset_gids_task);
work->dev = dev;
+ work->port = port;
queue_work(wq, &work->work);
return 0;
}
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) :
event_netdev;
+ union ib_gid default_gid;
+
+ mlx4_make_default_gid(real_dev, &default_gid);
+
+ if (!memcmp(gid, &default_gid, sizeof(*gid)))
+ return 0;
if (event != NETDEV_DOWN && event != NETDEV_UP)
return 0;
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
update_gid_table(ibdev, port, gid,
- event == NETDEV_DOWN);
+ event == NETDEV_DOWN, 0);
spin_unlock(&iboe->lock);
return 0;
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
rdma_vlan_dev_real_dev(dev) : dev;
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) &&
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
(real_dev == iboe->netdevs[port - 1])))
break;
- spin_unlock(&iboe->lock);
-
if ((port == 0) || (port > MLX4_MAX_PORTS))
return 0;
else
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
/*ifa->ifa_address;*/
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
- update_gid_table(ibdev, port, &gid, 0);
+ update_gid_table(ibdev, port, &gid, 0, 0);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
- update_gid_table(ibdev, port, pgid, 0);
+ update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
#endif
}
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev, u8 port)
+{
+ union ib_gid gid;
+ mlx4_make_default_gid(dev, &gid);
+ update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
{
struct net_device *dev;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ int i;
- if (reset_gid_table(ibdev))
- return -1;
+ for (i = 1; i <= ibdev->num_ports; ++i)
+ if (reset_gid_table(ibdev, i))
+ return -1;
read_lock(&dev_base_lock);
+ spin_lock(&iboe->lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
mlx4_ib_get_dev_addr(dev, ibdev, port);
}
+ spin_unlock(&iboe->lock);
read_unlock(&dev_base_lock);
return 0;
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+ enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1];
+ struct net_device *curr_netdev;
struct net_device *curr_master;
+
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
+ if (iboe->netdevs[port - 1])
+ mlx4_ib_set_default_gid(ibdev,
+ iboe->netdevs[port - 1], port);
+ curr_netdev = iboe->netdevs[port - 1];
if (iboe->netdevs[port - 1] &&
netif_is_bond_slave(iboe->netdevs[port - 1])) {
- rtnl_lock();
iboe->masters[port - 1] = netdev_master_upper_dev_get(
iboe->netdevs[port - 1]);
- rtnl_unlock();
+ } else {
+ iboe->masters[port - 1] = NULL;
}
curr_master = iboe->masters[port - 1];
+ if (curr_netdev) {
+ port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ } else {
+ reset_gid_table(ibdev, port);
+ }
+ /* if using bonding/team and a slave port is down, we don't the bond IP
+ * based gids in the table since flows that select port by gid may get
+ * the down port.
+ */
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ }
/* if bonding is used it is possible that we add it to masters
- only after IP address is assigned to the net bonding
- interface */
- if (curr_master && (old_master != curr_master))
+ * only after IP address is assigned to the net bonding
+ * interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+ }
+
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
}
spin_unlock(&iboe->lock);
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j;
int err;
struct mlx4_ib_iboe *iboe;
+ int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version);
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ ib_num_ports) {
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
}
#endif
+ for (i = 1 ; i <= ibdev->num_ports ; ++i)
+ reset_gid_table(ibdev, i);
+ rtnl_lock();
mlx4_ib_scan_netdevs(ibdev);
+ rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI && X86
+ depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9660d093f8cf..aa03e732b6a8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req req;
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
int num_uars;
+ int ver;
int uuarn;
int err;
int i;
+ int reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- err = ib_copy_from_udata(&req, udata, sizeof(req));
+ memset(&req, 0, sizeof(req));
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
if (err)
return ERR_PTR(err);
+ if (req.flags || req.reserved)
+ return ERR_PTR(-EINVAL);
+
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_uars;
+ uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae37fb9bf262..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
break;
case IB_QPT_UD:
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- uuarn = alloc_med_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- uuarn = alloc_high_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (init_attr->create_flags)
+ return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 32a2a5dfc523..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars;
};
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ netdev = nes_netdev_init(nesdev, mmio_regs);
+ if (netdev == NULL) {
+ ret = -ENOMEM;
goto bail7;
+ }
/* Register network device */
ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 2ca86ca818bd..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan)
- netdev = vlan_dev_real_dev(netdev);
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index aa92f40c9d50..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 7ecc6061f1f4..f8dfd76be89f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
{
enum usnic_transport_type trans_type = qp_flow->trans_type;
int err;
+ uint16_t port_num = 0;
switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
case USNIC_TRANSPORT_IPV4_UDP:
err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
NULL, NULL,
- (uint16_t *) id);
+ &port_num);
if (err)
return err;
+ /*
+ * Copy port_num to stack first and then to *id,
+ * so that the short to int cast works for little
+ * and big endian systems.
+ */
+ *id = port_num;
break;
default:
usnic_err("Unsupported transport %u\n", trans_type);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iser_conn)
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2b161be3c1a3..d18d08a076e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
if (ret) {
pr_err("Failed to create fastreg descriptor err=%d\n",
ret);
+ kfree(fr_desc);
goto err;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5a490b..0e537d8d0e47 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0c707e4f4eaf..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4f6b5940e609..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
- printk(KERN_ERR "block %u key %zi/%u: ", set,
+ printk(KERN_ERR "block %u key %li/%u: ", set,
(uint64_t *) k - i->d, i->keys);
if (b->ops->key_dump)
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
+ struct page *outp;
+
BUG_ON(order > state->page_order);
- out = page_address(mempool_alloc(state->pool, GFP_NOIO));
+ outp = mempool_alloc(state->pool, GFP_NOIO);
+ out = page_address(outp);
used_mempool = true;
order = state->page_order;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 98cc0a810a36..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
- (1 << 14) - 1));
+ MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
static size_t insert_u64s_remaining(struct btree *b)
{
- ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
/*
* Might land in the middle of an existing extent and have to split it
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72cd213f213f..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (op->bypass)
- return bch_data_invalidate(cl);
-
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6ab69333a6d..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
return MAP_CONTINUE;
}
-int bch_bset_print_stats(struct cache_set *c, char *buf)
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
{
struct bset_stats_op op;
int ret;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
+ int uptodate;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse */
+ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
+ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b);
+ if (!uptodate)
+ clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
+ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the BIO_UPTODATE flag */
+ set_bit(BIO_UPTODATE, &sbio->bi_flags);
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ if (uptodate) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && uptodate)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b327cdb..2c2c9cc75231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ kfree(dma_map);
return rc;
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b9492a82..9b809cfc2899 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -908,7 +908,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
- cl->read_cb = NULL;
cl->timer_count = 0;
}
}
@@ -942,8 +941,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
+ struct list_head *list;
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list = &dev->write_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
+
+ list = &dev->write_waiting_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff873f891..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
- int ret = 0, iovcnt = copy->iovcnt;
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc54fe4b..3e049c13429c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f342278539d5..494b888a6568 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@ config MACVTAP
This adds a specialized tap character device driver that is based
on the MAC-VLAN network interface, called macvtap. A macvtap device
can be added in the same way as a macvlan device, using 'type
- macvlan', and then be accessed through the tap user space interface.
+ macvtap', and then be accessed through the tap user space interface.
To compile this driver as a module, choose M here: the module
will be called macvtap.
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cce1f1bf90b4..f04f3625e944 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -768,11 +768,11 @@ static int ad_lacpdu_send(struct port *port)
lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
- memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback lacpdus in receive.
*/
- memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
lacpdu_header->lacpdu = port->lacpdu;
@@ -810,11 +810,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
marker_header = (struct bond_marker_header *)skb_put(skb, length);
- memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback MARKERs in receive.
*/
- memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
marker_header->marker = *marker;
@@ -1079,7 +1079,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* detect loopback situation */
if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
&(port->actor_system))) {
- pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+ pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->bond->dev->name,
port->slave->dev->name);
return;
@@ -1796,8 +1797,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
BOND_AD_INFO(bond).agg_select_timer = timeout;
}
-static u16 aggregator_identifier;
-
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
@@ -1811,7 +1810,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
bond->dev->dev_addr)) {
- aggregator_identifier = 0;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1880,7 +1879,7 @@ void bond_3ad_bind_slave(struct slave *slave)
ad_initialize_agg(aggregator);
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
- aggregator->aggregator_identifier = (++aggregator_identifier);
+ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
aggregator->slave = slave;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
@@ -1950,7 +1949,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
* new aggregator
*/
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+ pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
@@ -2300,9 +2299,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- pr_debug("Port %d changed link status to %s",
- port->actor_port_number,
- (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ pr_debug("Port %d changed link status to %s\n",
+ port->actor_port_number,
+ link == BOND_LINK_UP ? "UP" : "DOWN");
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
@@ -2380,17 +2379,16 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
}
}
- if (aggregator) {
- ad_info->aggregator_id = aggregator->aggregator_identifier;
- ad_info->ports = aggregator->num_of_ports;
- ad_info->actor_key = aggregator->actor_oper_aggregator_key;
- ad_info->partner_key = aggregator->partner_oper_aggregator_key;
- memcpy(ad_info->partner_system,
- aggregator->partner_system.mac_addr_value, ETH_ALEN);
- return 0;
- }
+ if (!aggregator)
+ return -1;
- return -1;
+ ad_info->aggregator_id = aggregator->aggregator_identifier;
+ ad_info->ports = aggregator->num_of_ports;
+ ad_info->actor_key = aggregator->actor_oper_aggregator_key;
+ ad_info->partner_key = aggregator->partner_oper_aggregator_key;
+ ether_addr_copy(ad_info->partner_system,
+ aggregator->partner_system.mac_addr_value);
+ return 0;
}
/* Wrapper used to hold bond->lock so no slave manipulation can occur */
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 13dc9d3c5e34..bb03b1df2f3e 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -28,7 +28,7 @@
#include <linux/netdevice.h>
#include <linux/if_ether.h>
-// General definitions
+/* General definitions */
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
@@ -47,54 +47,54 @@ enum {
BOND_AD_COUNT = 2,
};
-// rx machine states(43.4.11 in the 802.3ad standard)
+/* rx machine states(43.4.11 in the 802.3ad standard) */
typedef enum {
AD_RX_DUMMY,
- AD_RX_INITIALIZE, // rx Machine
- AD_RX_PORT_DISABLED, // rx Machine
- AD_RX_LACP_DISABLED, // rx Machine
- AD_RX_EXPIRED, // rx Machine
- AD_RX_DEFAULTED, // rx Machine
- AD_RX_CURRENT // rx Machine
+ AD_RX_INITIALIZE, /* rx Machine */
+ AD_RX_PORT_DISABLED, /* rx Machine */
+ AD_RX_LACP_DISABLED, /* rx Machine */
+ AD_RX_EXPIRED, /* rx Machine */
+ AD_RX_DEFAULTED, /* rx Machine */
+ AD_RX_CURRENT /* rx Machine */
} rx_states_t;
-// periodic machine states(43.4.12 in the 802.3ad standard)
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
typedef enum {
AD_PERIODIC_DUMMY,
- AD_NO_PERIODIC, // periodic machine
- AD_FAST_PERIODIC, // periodic machine
- AD_SLOW_PERIODIC, // periodic machine
- AD_PERIODIC_TX // periodic machine
+ AD_NO_PERIODIC, /* periodic machine */
+ AD_FAST_PERIODIC, /* periodic machine */
+ AD_SLOW_PERIODIC, /* periodic machine */
+ AD_PERIODIC_TX /* periodic machine */
} periodic_states_t;
-// mux machine states(43.4.13 in the 802.3ad standard)
+/* mux machine states(43.4.13 in the 802.3ad standard) */
typedef enum {
AD_MUX_DUMMY,
- AD_MUX_DETACHED, // mux machine
- AD_MUX_WAITING, // mux machine
- AD_MUX_ATTACHED, // mux machine
- AD_MUX_COLLECTING_DISTRIBUTING // mux machine
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
-// tx machine states(43.4.15 in the 802.3ad standard)
+/* tx machine states(43.4.15 in the 802.3ad standard) */
typedef enum {
AD_TX_DUMMY,
- AD_TRANSMIT // tx Machine
+ AD_TRANSMIT /* tx Machine */
} tx_states_t;
-// rx indication types
+/* rx indication types */
typedef enum {
- AD_TYPE_LACPDU = 1, // type lacpdu
- AD_TYPE_MARKER // type marker
+ AD_TYPE_LACPDU = 1, /* type lacpdu */
+ AD_TYPE_MARKER /* type marker */
} pdu_type_t;
-// rx marker indication types
+/* rx marker indication types */
typedef enum {
- AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype
- AD_MARKER_RESPONSE_SUBTYPE // marker response subtype
+ AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
+ AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
} bond_marker_subtype_t;
-// timers types(43.4.9 in the 802.3ad standard)
+/* timers types(43.4.9 in the 802.3ad standard) */
typedef enum {
AD_CURRENT_WHILE_TIMER,
AD_ACTOR_CHURN_TIMER,
@@ -105,35 +105,35 @@ typedef enum {
#pragma pack(1)
-// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
typedef struct lacpdu {
- u8 subtype; // = LACP(= 0x01)
+ u8 subtype; /* = LACP(= 0x01) */
u8 version_number;
- u8 tlv_type_actor_info; // = actor information(type/length/value)
- u8 actor_information_length; // = 20
+ u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+ u8 actor_information_length; /* = 20 */
__be16 actor_system_priority;
struct mac_addr actor_system;
__be16 actor_key;
__be16 actor_port_priority;
__be16 actor_port;
u8 actor_state;
- u8 reserved_3_1[3]; // = 0
- u8 tlv_type_partner_info; // = partner information
- u8 partner_information_length; // = 20
+ u8 reserved_3_1[3]; /* = 0 */
+ u8 tlv_type_partner_info; /* = partner information */
+ u8 partner_information_length; /* = 20 */
__be16 partner_system_priority;
struct mac_addr partner_system;
__be16 partner_key;
__be16 partner_port_priority;
__be16 partner_port;
u8 partner_state;
- u8 reserved_3_2[3]; // = 0
- u8 tlv_type_collector_info; // = collector information
- u8 collector_information_length; // = 16
+ u8 reserved_3_2[3]; /* = 0 */
+ u8 tlv_type_collector_info; /* = collector information */
+ u8 collector_information_length;/* = 16 */
__be16 collector_max_delay;
u8 reserved_12[12];
- u8 tlv_type_terminator; // = terminator
- u8 terminator_length; // = 0
- u8 reserved_50[50]; // = 0
+ u8 tlv_type_terminator; /* = terminator */
+ u8 terminator_length; /* = 0 */
+ u8 reserved_50[50]; /* = 0 */
} __packed lacpdu_t;
typedef struct lacpdu_header {
@@ -141,20 +141,20 @@ typedef struct lacpdu_header {
struct lacpdu lacpdu;
} __packed lacpdu_header_t;
-// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
typedef struct bond_marker {
- u8 subtype; // = 0x02 (marker PDU)
- u8 version_number; // = 0x01
- u8 tlv_type; // = 0x01 (marker information)
- // = 0x02 (marker response information)
- u8 marker_length; // = 0x16
- u16 requester_port; // The number assigned to the port by the requester
- struct mac_addr requester_system; // The requester's system id
- u32 requester_transaction_id; // The transaction id allocated by the requester,
- u16 pad; // = 0
- u8 tlv_type_terminator; // = 0x00
- u8 terminator_length; // = 0x00
- u8 reserved_90[90]; // = 0
+ u8 subtype; /* = 0x02 (marker PDU) */
+ u8 version_number; /* = 0x01 */
+ u8 tlv_type; /* = 0x01 (marker information) */
+ /* = 0x02 (marker response information) */
+ u8 marker_length; /* = 0x16 */
+ u16 requester_port; /* The number assigned to the port by the requester */
+ struct mac_addr requester_system; /* The requester's system id */
+ u32 requester_transaction_id; /* The transaction id allocated by the requester, */
+ u16 pad; /* = 0 */
+ u8 tlv_type_terminator; /* = 0x00 */
+ u8 terminator_length; /* = 0x00 */
+ u8 reserved_90[90]; /* = 0 */
} __packed bond_marker_t;
typedef struct bond_marker_header {
@@ -173,7 +173,7 @@ struct port;
#pragma pack(8)
#endif
-// aggregator structure(43.4.5 in the 802.3ad standard)
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
typedef struct aggregator {
struct mac_addr aggregator_mac_address;
u16 aggregator_identifier;
@@ -183,12 +183,12 @@ typedef struct aggregator {
struct mac_addr partner_system;
u16 partner_system_priority;
u16 partner_oper_aggregator_key;
- u16 receive_state; // BOOLEAN
- u16 transmit_state; // BOOLEAN
+ u16 receive_state; /* BOOLEAN */
+ u16 transmit_state; /* BOOLEAN */
struct port *lag_ports;
- // ****** PRIVATE PARAMETERS ******
- struct slave *slave; // pointer to the bond slave that this aggregator belongs to
- u16 is_active; // BOOLEAN. Indicates if this aggregator is active
+ /* ****** PRIVATE PARAMETERS ****** */
+ struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
+ u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
u16 num_of_ports;
} aggregator_t;
@@ -201,12 +201,12 @@ struct port_params {
u16 port_state;
};
-// port structure(43.4.6 in the 802.3ad standard)
+/* port structure(43.4.6 in the 802.3ad standard) */
typedef struct port {
u16 actor_port_number;
u16 actor_port_priority;
- struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification
- u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification
+ struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
u16 actor_port_aggregator_identifier;
bool ntt;
u16 actor_admin_port_key;
@@ -219,24 +219,24 @@ typedef struct port {
bool is_enabled;
- // ****** PRIVATE PARAMETERS ******
- u16 sm_vars; // all state machines variables for this port
- rx_states_t sm_rx_state; // state machine rx state
- u16 sm_rx_timer_counter; // state machine rx timer counter
- periodic_states_t sm_periodic_state;// state machine periodic state
- u16 sm_periodic_timer_counter; // state machine periodic timer counter
- mux_states_t sm_mux_state; // state machine mux state
- u16 sm_mux_timer_counter; // state machine mux timer counter
- tx_states_t sm_tx_state; // state machine tx state
- u16 sm_tx_timer_counter; // state machine tx timer counter(allways on - enter to transmit state 3 time per second)
- struct slave *slave; // pointer to the bond slave that this port belongs to
- struct aggregator *aggregator; // pointer to an aggregator that this port related to
- struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator
- u32 transaction_id; // continuous number for identification of Marker PDU's;
- struct lacpdu lacpdu; // the lacpdu that will be sent for this port
+ /* ****** PRIVATE PARAMETERS ****** */
+ u16 sm_vars; /* all state machines variables for this port */
+ rx_states_t sm_rx_state; /* state machine rx state */
+ u16 sm_rx_timer_counter; /* state machine rx timer counter */
+ periodic_states_t sm_periodic_state; /* state machine periodic state */
+ u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
+ mux_states_t sm_mux_state; /* state machine mux state */
+ u16 sm_mux_timer_counter; /* state machine mux timer counter */
+ tx_states_t sm_tx_state; /* state machine tx state */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ struct slave *slave; /* pointer to the bond slave that this port belongs to */
+ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
+ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
+ u32 transaction_id; /* continuous number for identification of Marker PDU's; */
+ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
} port_t;
-// system structure
+/* system structure */
struct ad_system {
u16 sys_priority;
struct mac_addr sys_mac_addr;
@@ -246,26 +246,26 @@ struct ad_system {
#pragma pack()
#endif
-// ================= AD Exported structures to the main bonding code ==================
+/* ========== AD Exported structures to the main bonding code ========== */
#define BOND_AD_INFO(bond) ((bond)->ad_info)
#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
struct ad_bond_info {
- struct ad_system system; /* 802.3ad system structure */
- u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ u16 aggregator_identifier;
};
struct ad_slave_info {
- struct aggregator aggregator; // 802.3ad aggregator structure
- struct port port; // 802.3ad port structure
- spinlock_t state_machine_lock; /* mutex state machines vs.
- incoming LACPDU */
+ struct aggregator aggregator; /* 802.3ad aggregator structure */
+ struct port port; /* 802.3ad port structure */
+ spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */
u16 id;
};
-// ================= AD Exported functions to the main bonding code ==================
+/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-void bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
@@ -280,5 +280,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif //__BOND_3AD_H__
+#endif /* __BOND_3AD_H__ */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index a2c47476804d..aaeeacf767f2 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -93,9 +93,8 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
int i;
u8 hash = 0;
- for (i = 0; i < hash_size; i++) {
+ for (i = 0; i < hash_size; i++)
hash ^= hash_start[i];
- }
return hash;
}
@@ -190,9 +189,8 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
- for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
- }
_unlock_tx_hashtbl_bh(bond);
@@ -264,9 +262,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
hash_table[hash_index].next = next_index;
hash_table[hash_index].prev = TLB_NULL_INDEX;
- if (next_index != TLB_NULL_INDEX) {
+ if (next_index != TLB_NULL_INDEX)
hash_table[next_index].prev = hash_index;
- }
slave_info->head = hash_index;
slave_info->load +=
@@ -274,9 +271,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
}
}
- if (assigned_slave) {
+ if (assigned_slave)
hash_table[hash_index].tx_bytes += skb_len;
- }
return assigned_slave;
}
@@ -329,7 +325,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_lock_rx_hashtbl_bh(bond);
- hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->assigned) &&
@@ -337,7 +333,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
(client_info->ip_dst == arp->ip_src) &&
(!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
- memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_src);
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
@@ -451,9 +447,8 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
{
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return;
- }
if (!bond->alb_info.primary_is_promisc) {
if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
@@ -513,9 +508,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
write_lock_bh(&bond->curr_slave_lock);
- if (slave != bond->curr_active_slave) {
+ if (slave != bond->curr_active_slave)
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
- }
write_unlock_bh(&bond->curr_slave_lock);
}
@@ -524,9 +518,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave) {
+ if (!client_info->slave)
return;
- }
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
struct sk_buff *skb;
@@ -574,9 +567,8 @@ static void rlb_update_rx_clients(struct bonding *bond)
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
- if (bond_info->rlb_update_retry_counter == 0) {
+ if (bond_info->rlb_update_retry_counter == 0)
client_info->ntt = 0;
- }
}
}
@@ -610,10 +602,10 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
}
}
- // update the team's flag only after the whole iteration
+ /* update the team's flag only after the whole iteration */
if (ntt) {
bond_info->rx_ntt = 1;
- //fasten the change
+ /* fasten the change */
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
@@ -677,9 +669,9 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
/* the entry is already assigned to this client */
if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
/* update mac address from arp */
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
}
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
assigned_slave = client_info->slave;
if (assigned_slave) {
@@ -719,8 +711,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
client_info->slave = assigned_slave;
if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -770,9 +762,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* rx channel
*/
tx_slave = rlb_choose_channel(skb, bond);
- if (tx_slave) {
- memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
- }
+ if (tx_slave)
+ ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
pr_debug("Server sent ARP Reply packet\n");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
@@ -824,9 +815,8 @@ static void rlb_rebalance(struct bonding *bond)
}
/* update the team's flag only after the whole iteration */
- if (ntt) {
+ if (ntt)
bond_info->rx_ntt = 1;
- }
_unlock_rx_hashtbl_bh(bond);
}
@@ -923,7 +913,7 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
u32 index;
_lock_rx_hashtbl_bh(bond);
@@ -957,9 +947,8 @@ static int rlb_initialize(struct bonding *bond)
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
- for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
- }
_unlock_rx_hashtbl_bh(bond);
@@ -1014,8 +1003,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
char *data;
memset(&pkt, 0, size);
- memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
- memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
+ ether_addr_copy(pkt.mac_dst, mac_addr);
+ ether_addr_copy(pkt.mac_src, mac_addr);
pkt.type = cpu_to_be16(ETH_P_LOOP);
skb = dev_alloc_skb(size);
@@ -1097,7 +1086,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
{
u8 tmp_mac_addr[ETH_ALEN];
- memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
alb_set_slave_mac_addr(slave2, tmp_mac_addr);
@@ -1254,9 +1243,9 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
- pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
- bond->dev->name, slave->dev->name,
- free_mac_slave->dev->name);
+ pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ bond->dev->name, slave->dev->name,
+ free_mac_slave->dev->name);
} else if (has_bond_addr) {
pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
@@ -1294,12 +1283,12 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, slave->dev->dev_addr);
res = dev_set_mac_address(slave->dev, addr);
/* restore net_device's hw address */
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(slave->dev->dev_addr, tmp_addr);
if (res)
goto unwind;
@@ -1315,9 +1304,9 @@ unwind:
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
- memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
dev_set_mac_address(rollback_slave->dev, &sa);
- memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
}
return res;
@@ -1330,9 +1319,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
int res;
res = tlb_initialize(bond);
- if (res) {
+ if (res)
return res;
- }
if (rlb_enabled) {
bond->alb_info.rlb_enabled = 1;
@@ -1355,9 +1343,8 @@ void bond_alb_deinitialize(struct bonding *bond)
tlb_deinitialize(bond);
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
rlb_deinitialize(bond);
- }
}
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
@@ -1436,14 +1423,13 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char*)eth_data->h_dest;
+ hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
do_tx_balance = 0;
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
- }
break;
default:
do_tx_balance = 0;
@@ -1463,19 +1449,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- memcpy(eth_data->h_source,
- tx_slave->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
}
bond_dev_queue_xmit(bond, skb, tx_slave->dev);
goto out;
- } else {
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
+ }
+
+ if (tx_slave) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
}
/* no suitable interface, frame not sent */
@@ -1577,11 +1562,10 @@ void bond_alb_monitor(struct work_struct *work)
--bond_info->rlb_update_delay_counter;
} else {
rlb_update_rx_clients(bond);
- if (bond_info->rlb_update_retry_counter) {
+ if (bond_info->rlb_update_retry_counter)
--bond_info->rlb_update_retry_counter;
- } else {
+ else
bond_info->rx_ntt = 0;
- }
}
}
}
@@ -1598,23 +1582,20 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
- if (res) {
+ if (res)
return res;
- }
res = alb_handle_addr_collision_on_attach(bond, slave);
- if (res) {
+ if (res)
return res;
- }
tlb_init_slave(slave);
/* order a rebalance ASAP */
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
- }
return 0;
}
@@ -1645,9 +1626,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
if (link == BOND_LINK_DOWN) {
tlb_clear_slave(bond, slave, 0);
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_slave(bond, slave);
- }
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
@@ -1723,14 +1703,14 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
struct sockaddr sa;
u8 tmp_addr[ETH_ALEN];
- memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
sa.sa_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
dev_set_mac_address(new_slave->dev, &sa);
- memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1759,14 +1739,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
struct slave *swap_slave;
int res;
- if (!is_valid_ether_addr(sa->sa_data)) {
+ if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- }
res = alb_set_mac_address(bond, addr);
- if (res) {
+ if (res)
return res;
- }
memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
@@ -1774,9 +1752,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
* Otherwise we'll need to pass the new address to it and handle
* duplications.
*/
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return 0;
- }
swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
@@ -1800,8 +1777,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_vlan(bond, vlan_id);
- }
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 5fc4c2351478..2d3f7fa541ff 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -69,7 +69,7 @@ void bond_debug_register(struct bonding *bond)
debugfs_create_dir(bond->dev->name, bonding_debug_root);
if (!bond->debug_dir) {
- pr_warning("%s: Warning: failed to register to debugfs\n",
+ pr_warn("%s: Warning: failed to register to debugfs\n",
bond->dev->name);
return;
}
@@ -98,9 +98,8 @@ void bond_debug_reregister(struct bonding *bond)
if (d) {
bond->debug_dir = d;
} else {
- pr_warning("%s: Warning: failed to reregister, "
- "so just unregister old one\n",
- bond->dev->name);
+ pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
+ bond->dev->name);
bond_debug_unregister(bond);
}
}
@@ -110,8 +109,7 @@ void bond_create_debugfs(void)
bonding_debug_root = debugfs_create_dir("bonding", NULL);
if (!bonding_debug_root) {
- pr_warning("Warning: Cannot create bonding directory"
- " in debugfs\n");
+ pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 71ba18efa15b..bd70bbc7992c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -673,12 +673,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
write_unlock_bh(&bond->curr_slave_lock);
if (old_active) {
- memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
- memcpy(saddr.sa_data, old_active->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+ ether_addr_copy(saddr.sa_data,
+ old_active->dev->dev_addr);
saddr.sa_family = new_active->dev->type;
} else {
- memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
saddr.sa_family = bond->dev->type;
}
@@ -692,7 +692,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!old_active)
goto out;
- memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, tmp_mac);
saddr.sa_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev, &saddr);
@@ -798,11 +798,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
if (new_active) {
- new_active->jiffies = jiffies;
+ new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+ pr_info("%s: making interface %s the new active one %d ms earlier\n",
bond->dev->name, new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
}
@@ -817,7 +817,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one.\n",
+ pr_info("%s: making interface %s the new active one\n",
bond->dev->name, new_active->dev->name);
}
}
@@ -906,7 +906,7 @@ void bond_select_active_slave(struct bonding *bond)
pr_info("%s: first active interface up!\n",
bond->dev->name);
} else {
- pr_info("%s: now running without any active interface !\n",
+ pr_info("%s: now running without any active interface!\n",
bond->dev->name);
}
}
@@ -1115,9 +1115,6 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- if (bond->params.arp_interval)
- slave->dev->last_rx = jiffies;
-
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
@@ -1142,7 +1139,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
- memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
}
return ret;
@@ -1183,13 +1180,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- pr_warning("%s: Warning: no link monitoring support for %s\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: no link monitoring support for %s\n",
+ bond_dev->name, slave_dev->name);
}
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
- pr_debug("Error, Device was already enslaved\n");
+ pr_debug("Error: Device was already enslaved\n");
return -EBUSY;
}
@@ -1202,9 +1199,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
} else {
- pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
- bond_dev->name, slave_dev->name,
- slave_dev->name, bond_dev->name);
+ pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+ bond_dev->name, slave_dev->name,
+ slave_dev->name, bond_dev->name);
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
@@ -1217,7 +1214,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- pr_err("%s is up. This may be due to an out of date ifenslave.\n",
+ pr_err("%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
res = -EPERM;
goto err_undo_flags;
@@ -1261,24 +1258,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
- pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
- slave_dev->name,
- slave_dev->type, bond_dev->type);
+ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+ slave_dev->name, slave_dev->type, bond_dev->type);
res = -EINVAL;
goto err_undo_flags;
}
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
bond_dev->name);
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
- pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
bond_dev->name);
}
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
bond_dev->name);
res = -EOPNOTSUPP;
goto err_undo_flags;
@@ -1317,7 +1313,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
- memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
@@ -1401,10 +1397,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_update_speed_duplex(new_slave);
- new_slave->last_arp_rx = jiffies -
+ new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
- new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
+ new_slave->target_last_arp_rx[i] = new_slave->last_rx;
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1419,12 +1415,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+ bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+ bond_dev->name, slave_dev->name);
}
}
@@ -1448,10 +1444,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (new_slave->link != BOND_LINK_DOWN)
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
- new_slave->link == BOND_LINK_DOWN ? "DOWN" :
- (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@ -1510,9 +1506,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- pr_info("Error, %s: master_dev is using netpoll, "
- "but new slave device does not support netpoll.\n",
- bond_dev->name);
+ pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
+ bond_dev->name);
res = -EBUSY;
goto err_detach;
}
@@ -1543,15 +1538,17 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
}
- pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+ pr_info("%s: Enslaving %s as %s interface with %s link\n",
bond_dev->name, slave_dev->name,
- bond_is_active_slave(new_slave) ? "n active" : " backup",
- new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+ bond_is_active_slave(new_slave) ? "an active" : "a backup",
+ new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
return 0;
@@ -1571,10 +1568,12 @@ err_detach:
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
}
slave_disable_netpoll(new_slave);
@@ -1589,7 +1588,7 @@ err_restore_mac:
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
- memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1634,7 +1633,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- pr_err("%s: Error: cannot release %s.\n",
+ pr_err("%s: Error: cannot release %s\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
}
@@ -1668,7 +1667,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
- pr_info("%s: releasing %s interface %s\n",
+ pr_info("%s: Releasing %s interface %s\n",
bond_dev->name,
bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
@@ -1681,10 +1680,10 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
- pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
}
if (bond->primary_slave == slave)
@@ -1725,10 +1724,10 @@ static int __bond_release_one(struct net_device *bond_dev,
eth_hw_addr_random(bond_dev);
if (vlan_uses_dev(bond_dev)) {
- pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
+ bond_dev->name, bond_dev->name);
+ pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
+ bond_dev->name);
}
}
@@ -1743,7 +1742,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
@@ -1778,7 +1777,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1811,7 +1810,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("%s: destroying bond %s.\n",
+ pr_info("%s: Destroying bond %s\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
@@ -1880,7 +1879,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
bond->dev->name,
(bond->params.mode ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -1896,8 +1895,8 @@ static int bond_miimon_inspect(struct bonding *bond)
* recovered before downdelay expired
*/
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
- pr_info("%s: link status up again after %d ms for interface %s.\n",
+ slave->last_link_up = jiffies;
+ pr_info("%s: link status up again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
@@ -1922,7 +1921,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+ pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
bond->dev->name, slave->dev->name,
ignore_updelay ? 0 :
bond->params.updelay *
@@ -1932,7 +1931,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- pr_info("%s: link status down again after %d ms for interface %s.\n",
+ pr_info("%s: link status down again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.updelay - slave->delay) *
bond->params.miimon,
@@ -1971,7 +1970,7 @@ static void bond_miimon_commit(struct bonding *bond)
case BOND_LINK_UP:
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
+ slave->last_link_up = jiffies;
if (bond->params.mode == BOND_MODE_8023AD) {
/* prevent it from being the active one */
@@ -1984,7 +1983,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_set_backup_slave(slave);
}
- pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
bond->dev->name, slave->dev->name,
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2132,8 +2131,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
{
struct sk_buff *skb;
- pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
- slave_dev->name, &dest_ip, &src_ip, vlan_id);
+ pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n",
+ arp_op, slave_dev->name, &dest_ip, &src_ip, vlan_id);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
@@ -2247,7 +2246,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
pr_debug("bva: sip %pI4 not found in targets\n", &sip);
return;
}
- slave->last_arp_rx = jiffies;
+ slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
@@ -2255,17 +2254,19 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
+ struct slave *curr_active_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int alen;
+ int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
- if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
+ !slave_do_arp_validate_only(bond, slave))
+ slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
-
- read_lock(&bond->lock);
-
- if (!slave_do_arp_validate(bond, slave))
- goto out_unlock;
+ } else if (!is_arp) {
+ return RX_HANDLER_ANOTHER;
+ }
alen = arp_hdr_len(bond->dev);
@@ -2299,6 +2300,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
/*
* Backup slaves won't see the ARP reply, but do come through
* here for each ARP probe (so we swap the sip/tip to validate
@@ -2312,15 +2315,15 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
* is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
+
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
- else if (bond->curr_active_slave &&
- time_after(slave_last_rx(bond, bond->curr_active_slave),
- bond->curr_active_slave->jiffies))
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
out_unlock:
- read_unlock(&bond->lock);
if (arp != (struct arphdr *)skb->data)
kfree(arp);
return RX_HANDLER_ANOTHER;
@@ -2363,9 +2366,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
- * the picture unless it is null. also, slave->jiffies is not needed
- * here because we send an arp on each slave and give a slave as
- * long as it needs to get the tx/rx within the delta.
+ * the picture unless it is null. also, slave->last_link_up is not
+ * needed here because we send an arp on each slave and give a slave
+ * as long as it needs to get the tx/rx within the delta.
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
@@ -2374,7 +2377,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
- bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
+ bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->link = BOND_LINK_UP;
slave_state_changed = 1;
@@ -2385,7 +2388,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- pr_info("%s: link status definitely up for interface %s, ",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name,
slave->dev->name);
do_failover = 1;
@@ -2403,7 +2406,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
+ !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
slave_state_changed = 1;
@@ -2411,9 +2414,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- pr_info("%s: interface %s is now down.\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now down\n",
+ bond->dev->name, slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
@@ -2492,7 +2494,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (bond_time_in_interval(bond, slave->jiffies, 2))
+ if (bond_time_in_interval(bond, slave->last_link_up, 2))
continue;
/*
@@ -2562,7 +2564,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
bond->current_arp_slave = NULL;
}
- pr_info("%s: link status definitely up for interface %s.\n",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name, slave->dev->name);
if (!bond->curr_active_slave ||
@@ -2675,7 +2677,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave);
- pr_info("%s: backup interface %s is now down.\n",
+ pr_info("%s: backup interface %s is now down\n",
bond->dev->name, slave->dev->name);
}
if (slave == curr_arp_slave)
@@ -2693,7 +2695,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
new_slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(new_slave);
bond_arp_send_all(bond, new_slave);
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
rtnl_unlock();
@@ -2861,12 +2863,15 @@ static int bond_slave_netdev_event(unsigned long event,
break;
}
- pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
- bond->dev->name, bond->primary_slave ? slave_dev->name :
- "none");
+ pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
+ bond->dev->name,
+ bond->primary_slave ? slave_dev->name : "none");
+
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
@@ -2896,8 +2901,7 @@ static int bond_netdev_event(struct notifier_block *this,
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
pr_debug("event_dev: %s, event: %lx\n",
- event_dev ? event_dev->name : "None",
- event);
+ event_dev ? event_dev->name : "None", event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -3064,8 +3068,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
}
if (bond->params.mode == BOND_MODE_8023AD) {
@@ -3352,8 +3355,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
struct list_head *iter;
int res = 0;
- pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
- (bond_dev ? bond_dev->name : "None"), new_mtu);
+ pr_debug("bond=%p, name=%s, new_mtu=%d\n",
+ bond, bond_dev ? bond_dev->name : "None", new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -3372,8 +3375,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
bond_for_each_slave(bond, slave, iter) {
pr_debug("s %p c_m %p\n",
- slave,
- slave->dev->netdev_ops->ndo_change_mtu);
+ slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3461,15 +3463,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
*/
bond_for_each_slave(bond, slave, iter) {
- const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
-
- if (slave_ops->ndo_set_mac_address == NULL) {
- res = -EOPNOTSUPP;
- pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
- goto unwind;
- }
-
res = dev_set_mac_address(slave->dev, addr);
if (res) {
/* TODO: consider downing the slave
@@ -3700,7 +3694,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -3935,7 +3929,7 @@ static void bond_uninit(struct net_device *bond_dev)
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
- pr_info("%s: released all slaves\n", bond_dev->name);
+ pr_info("%s: Released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
@@ -4013,7 +4007,7 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
- bond_mode_name(bond_mode));
+ bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, xmit_hash_policy);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
@@ -4054,74 +4048,71 @@ static int bond_check_params(struct bond_params *params)
}
params->ad_select = valptr->value;
if (bond_mode != BOND_MODE_8023AD)
- pr_warning("ad_select param only affects 802.3ad mode\n");
+ pr_warn("ad_select param only affects 802.3ad mode\n");
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
- pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
- max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
- pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- miimon, INT_MAX);
+ pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ miimon, INT_MAX);
miimon = 0;
}
if (updelay < 0) {
- pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- updelay, INT_MAX);
+ pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
- pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- downdelay, INT_MAX);
+ pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
+ pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
use_carrier = 1;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
- pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
- num_peer_notif);
+ pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
+ num_peer_notif);
num_peer_notif = 1;
}
/* reset values for 802.3ad/TLB/ALB */
if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
- pr_warning("Forcing miimon to 100msec\n");
+ pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
+ pr_warn("Forcing miimon to 100msec\n");
miimon = BOND_DEFAULT_MIIMON;
}
}
if (tx_queues < 1 || tx_queues > 255) {
- pr_warning("Warning: tx_queues (%d) should be between "
- "1 and 255, resetting to %d\n",
- tx_queues, BOND_DEFAULT_TX_QUEUES);
+ pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
+ tx_queues, BOND_DEFAULT_TX_QUEUES);
tx_queues = BOND_DEFAULT_TX_QUEUES;
}
if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
- pr_warning("Warning: all_slaves_active module parameter (%d), "
- "not of valid value (0/1), so it was set to "
- "0\n", all_slaves_active);
+ pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
+ all_slaves_active);
all_slaves_active = 0;
}
if (resend_igmp < 0 || resend_igmp > 255) {
- pr_warning("Warning: resend_igmp (%d) should be between "
- "0 and 255, resetting to %d\n",
- resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+ pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
+ resend_igmp, BOND_DEFAULT_RESEND_IGMP);
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
@@ -4142,37 +4133,36 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
- updelay, downdelay);
+ pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+ updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
- miimon, arp_interval);
+ pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+ miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
- pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- updelay, miimon,
- (updelay / miimon) * miimon);
+ pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon, (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
- downdelay, miimon,
- (downdelay / miimon) * miimon);
+ pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
- pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
- arp_interval, INT_MAX);
+ pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ arp_interval, INT_MAX);
arp_interval = 0;
}
@@ -4183,30 +4173,26 @@ static int bond_check_params(struct bond_params *params)
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
- pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
- arp_ip_target[i]);
+ pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+ arp_ip_target[i]);
arp_interval = 0;
} else {
if (bond_get_targets_ip(arp_target, ip) == -1)
arp_target[arp_ip_count++] = ip;
else
- pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
- &ip);
+ pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
+ &ip);
}
}
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
- arp_interval);
+ pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+ arp_interval);
arp_interval = 0;
}
if (arp_validate) {
- if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("arp_validate only supported in active-backup mode\n");
- return -EINVAL;
- }
if (!arp_interval) {
pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
@@ -4248,23 +4234,23 @@ static int bond_check_params(struct bond_params *params)
arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
- pr_info(" %s", arp_ip_target[i]);
+ pr_cont(" %s", arp_ip_target[i]);
- pr_info("\n");
+ pr_cont("\n");
} else if (max_bonds) {
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
- primary, bond_mode_name(bond_mode));
+ pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
primary = NULL;
}
@@ -4293,14 +4279,14 @@ static int bond_check_params(struct bond_params *params)
}
fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
+ pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
if (lp_interval == 0) {
- pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
- INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 70651f8e8e3b..20659b114f24 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -181,7 +181,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
@@ -207,7 +207,7 @@ static int bond_changelink(struct net_device *bond_dev,
i++;
}
if (i == 0 && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
if (err)
return err;
@@ -216,7 +216,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP validating cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 11cb943222d5..23f365510b58 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/if.h>
#include <linux/netdevice.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/ctype.h>
#include <linux/inet.h>
@@ -47,11 +47,14 @@ static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
};
static struct bond_opt_value bond_arp_validate_tbl[] = {
- { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
- { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
- { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
- { "all", BOND_ARP_VALIDATE_ALL, 0},
- { NULL, -1, 0},
+ { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+ { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+ { "all", BOND_ARP_VALIDATE_ALL, 0},
+ { "filter", BOND_ARP_FILTER, 0},
+ { "filter_active", BOND_ARP_FILTER_ACTIVE, 0},
+ { "filter_backup", BOND_ARP_FILTER_BACKUP, 0},
+ { NULL, -1, 0},
};
static struct bond_opt_value bond_arp_all_targets_tbl[] = {
@@ -151,7 +154,8 @@ static struct bond_option bond_opts[] = {
.id = BOND_OPT_ARP_VALIDATE,
.name = "arp_validate",
.desc = "validate src/dst of ARP probes",
- .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
.values = bond_arp_validate_tbl,
.set = bond_option_arp_validate_set
},
@@ -473,10 +477,10 @@ static void bond_opt_error_interpret(struct bonding *bond,
p = strchr(val->string, '\n');
if (p)
*p = '\0';
- pr_err("%s: option %s: invalid value (%s).\n",
+ pr_err("%s: option %s: invalid value (%s)\n",
bond->dev->name, opt->name, val->string);
} else {
- pr_err("%s: option %s: invalid value (%llu).\n",
+ pr_err("%s: option %s: invalid value (%llu)\n",
bond->dev->name, opt->name, val->value);
}
}
@@ -484,7 +488,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
if (!maxval)
break;
- pr_err("%s: option %s: allowed values %llu - %llu.\n",
+ pr_err("%s: option %s: allowed values %llu - %llu\n",
bond->dev->name, opt->name, minval ? minval->value : 0,
maxval->value);
break;
@@ -492,11 +496,11 @@ static void bond_opt_error_interpret(struct bonding *bond,
bond_opt_dep_print(bond, opt);
break;
case -ENOTEMPTY:
- pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+ pr_err("%s: option %s: unable to set because the bond device has slaves\n",
bond->dev->name, opt->name);
break;
case -EBUSY:
- pr_err("%s: option %s: unable to set because the bond device is up.\n",
+ pr_err("%s: option %s: unable to set because the bond device is up\n",
bond->dev->name, opt->name);
break;
default:
@@ -589,7 +593,7 @@ int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
- pr_info("%s: Setting MII monitoring interval to %d.\n",
+ pr_info("%s: Setting MII monitoring interval to %d\n",
bond->dev->name, bond->params.miimon);
}
@@ -636,13 +640,13 @@ int bond_option_active_slave_set(struct bonding *bond,
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
- pr_err("Device %s is not bonding slave.\n",
+ pr_err("Device %s is not bonding slave\n",
slave_dev->name);
return -EINVAL;
}
if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
- pr_err("%s: Device %s is not our slave.\n",
+ pr_err("%s: Device %s is not our slave\n",
bond->dev->name, slave_dev->name);
return -EINVAL;
}
@@ -653,8 +657,7 @@ int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
+ pr_info("%s: Clearing current active slave\n", bond->dev->name);
rcu_assign_pointer(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
@@ -665,16 +668,16 @@ int bond_option_active_slave_set(struct bonding *bond,
if (new_active == old_active) {
/* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ pr_info("%s: %s is already the current active slave\n",
bond->dev->name, new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
+ pr_info("%s: Setting %s as active slave\n",
bond->dev->name, new_active->dev->name);
bond_change_active_slave(bond, new_active);
} else {
- pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
bond->dev->name, new_active->dev->name,
new_active->dev->name);
ret = -EINVAL;
@@ -690,19 +693,19 @@ int bond_option_active_slave_set(struct bonding *bond,
int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
{
- pr_info("%s: Setting MII monitoring interval to %llu.\n",
+ pr_info("%s: Setting MII monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.miimon = newval->value;
if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
@@ -742,9 +745,8 @@ int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.miimon);
}
bond->params.updelay = value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ pr_info("%s: Setting up delay to %d\n",
+ bond->dev->name, bond->params.updelay * bond->params.miimon);
return 0;
}
@@ -767,9 +769,8 @@ int bond_option_downdelay_set(struct bonding *bond,
bond->params.miimon);
}
bond->params.downdelay = value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Setting down delay to %d\n",
+ bond->dev->name, bond->params.downdelay * bond->params.miimon);
return 0;
}
@@ -777,7 +778,7 @@ int bond_option_downdelay_set(struct bonding *bond,
int bond_option_use_carrier_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting use_carrier to %llu.\n",
+ pr_info("%s: Setting use_carrier to %llu\n",
bond->dev->name, newval->value);
bond->params.use_carrier = newval->value;
@@ -787,17 +788,17 @@ int bond_option_use_carrier_set(struct bonding *bond,
int bond_option_arp_interval_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+ pr_info("%s: Setting ARP monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.arp_interval = newval->value;
if (newval->value) {
if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
@@ -812,8 +813,7 @@ int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -856,12 +856,11 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, 0); /* first free slot */
if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
+ pr_err("%s: ARP target table is full!\n", bond->dev->name);
return -EINVAL;
}
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+ pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
@@ -896,17 +895,16 @@ int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, target);
if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
bond->dev->name, &target);
return -EINVAL;
}
if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &target);
+ pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
@@ -954,7 +952,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
else if (newval->string[0] == '-')
ret = bond_option_arp_ip_target_rem(bond, target);
else
- pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
bond->dev->name);
} else {
target = newval->value;
@@ -967,7 +965,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
int bond_option_arp_validate_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_validate to %s (%llu).\n",
+ pr_info("%s: Setting arp_validate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
if (bond->dev->flags & IFF_UP) {
@@ -984,7 +982,7 @@ int bond_option_arp_validate_set(struct bonding *bond,
int bond_option_arp_all_targets_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+ pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.arp_all_targets = newval->value;
@@ -1006,8 +1004,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
*p = '\0';
/* check to see if we are clearing primary */
if (!strlen(primary)) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
@@ -1016,7 +1013,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
+ pr_info("%s: Setting %s as primary slave\n",
bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
strcpy(bond->params.primary, slave->dev->name);
@@ -1026,15 +1023,14 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
}
if (bond->primary_slave) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
}
strncpy(bond->params.primary, primary, IFNAMSIZ);
bond->params.primary[IFNAMSIZ - 1] = 0;
- pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
bond->dev->name, primary, bond->dev->name);
out:
@@ -1048,7 +1044,7 @@ out:
int bond_option_primary_reselect_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting primary_reselect to %s (%llu).\n",
+ pr_info("%s: Setting primary_reselect to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.primary_reselect = newval->value;
@@ -1064,7 +1060,7 @@ int bond_option_primary_reselect_set(struct bonding *bond,
int bond_option_fail_over_mac_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+ pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.fail_over_mac = newval->value;
@@ -1074,7 +1070,7 @@ int bond_option_fail_over_mac_set(struct bonding *bond,
int bond_option_xmit_hash_policy_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+ pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.xmit_policy = newval->value;
@@ -1084,7 +1080,7 @@ int bond_option_xmit_hash_policy_set(struct bonding *bond,
int bond_option_resend_igmp_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting resend_igmp to %llu.\n",
+ pr_info("%s: Setting resend_igmp to %llu\n",
bond->dev->name, newval->value);
bond->params.resend_igmp = newval->value;
@@ -1158,7 +1154,7 @@ int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
int bond_option_lacp_rate_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting LACP rate to %s (%llu).\n",
+ pr_info("%s: Setting LACP rate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.lacp_fast = newval->value;
bond_3ad_update_lacp_rate(bond);
@@ -1169,7 +1165,7 @@ int bond_option_lacp_rate_set(struct bonding *bond,
int bond_option_ad_select_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting ad_select to %s (%llu).\n",
+ pr_info("%s: Setting ad_select to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.ad_select = newval->value;
@@ -1199,8 +1195,7 @@ int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Check buffer length, valid ifname and queue id */
- if (strlen(newval->string) > IFNAMSIZ ||
- !dev_valid_name(newval->string) ||
+ if (!dev_valid_name(newval->string) ||
qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
@@ -1232,8 +1227,7 @@ out:
return ret;
err_no_cmd:
- pr_info("invalid input for queue_id set for %s.\n",
- bond->dev->name);
+ pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
ret = -EPERM;
goto out;
@@ -1254,7 +1248,7 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
+ pr_info("%s: interface %s does not exist!\n",
bond->dev->name, ifname);
ret = -ENODEV;
goto out;
@@ -1262,12 +1256,12 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
switch (command[0]) {
case '+':
- pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
ret = bond_enslave(bond->dev, dev);
break;
case '-':
- pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
ret = bond_release(bond->dev, dev);
break;
@@ -1279,7 +1273,7 @@ out:
return ret;
err_no_cmd:
- pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
bond->dev->name);
ret = -EPERM;
goto out;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 3ac20e78eafc..434df7360999 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -254,8 +254,8 @@ void bond_create_proc_entry(struct bonding *bond)
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
- pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
+ pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
@@ -281,8 +281,8 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
- pr_warning("Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
+ pr_warn("Warning: Cannot create /proc/net/%s\n",
+ DRV_NAME);
}
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 643fcc110299..225ee696db05 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -117,9 +117,9 @@ static ssize_t bonding_store_bonds(struct class *cls,
rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
- pr_info("%s already exists.\n", ifname);
+ pr_info("%s already exists\n", ifname);
else
- pr_info("%s creation failed.\n", ifname);
+ pr_info("%s creation failed\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
@@ -144,7 +144,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
return res;
err_no_cmd:
- pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
+ pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
return -EPERM;
}
@@ -1135,7 +1135,7 @@ int bond_create_sysfs(struct bond_net *bn)
/* Is someone being kinky and naming a device bonding_master? */
if (__dev_get_by_name(bn->net,
class_attr_bonding_masters.attr.name))
- pr_err("network device named %s already exists in sysfs",
+ pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 86ccfb9f71cc..430362891d0d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -188,8 +188,9 @@ struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- unsigned long jiffies;
- unsigned long last_arp_rx;
+ /* all three in jiffies */
+ unsigned long last_link_up;
+ unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
@@ -342,6 +343,11 @@ static inline bool bond_is_active_slave(struct slave *slave)
#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
+ BOND_ARP_FILTER)
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
@@ -349,6 +355,12 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
+static inline int slave_do_arp_validate_only(struct bonding *bond,
+ struct slave *slave)
+{
+ return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -368,14 +380,10 @@ static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
static inline unsigned long slave_last_rx(struct bonding *bond,
struct slave *slave)
{
- if (slave_do_arp_validate(bond, slave)) {
- if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
- return slave_oldest_target_arp_rx(bond, slave);
- else
- return slave->last_arp_rx;
- }
+ if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+ return slave_oldest_target_arp_rx(bond, slave);
- return slave->dev->last_rx;
+ return slave->last_rx;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 88a6a5810ec6..fc73865bb83a 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -204,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = ser->dev;
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 155db68e13ba..ff54c0eb2052 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -554,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = cfspi->ndev;
/*
* Push received packet up the stack.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6efe27458116..1d00b95f8983 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -420,7 +420,11 @@ static void at91_chip_start(struct net_device *dev)
at91_transceiver_switch(priv, 1);
/* enable chip */
- at91_write(priv, AT91_MR, AT91_MR_CANEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
+ else
+ reg_mr = AT91_MR_CANEN;
+ at91_write(priv, AT91_MR, reg_mr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1341,7 +1345,8 @@ static int at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index fc59bc6f040b..c0563f183721 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -512,6 +512,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
EXPORT_SYMBOL_GPL(alloc_can_skb);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ memset(*cfd, 0, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ff2ba86cd4a4..4b18b8765523 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -17,16 +17,9 @@ config CAN_SJA1000_PLATFORM
the "platform bus" (Linux abstraction for directly to the
processor attached devices). Which can be found on various
boards from Phytec (http://www.phytec.de) like the PCM027,
- PCM038.
-
-config CAN_SJA1000_OF_PLATFORM
- tristate "Generic OF Platform Bus based SJA1000 driver"
- depends on OF
- ---help---
- This driver adds support for the SJA1000 chips connected to
- the OpenFirmware "platform bus" found on embedded systems with
- OpenFirmware bindings, e.g. if you have a PowerPC based system
- you may want to enable this option.
+ PCM038. It also provides the OpenFirmware "platform bus" found
+ on embedded systems with OpenFirmware bindings, e.g. if you
+ have a PowerPC based system you may want to enable this option.
config CAN_EMS_PCMCIA
tristate "EMS CPC-CARD Card"
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index b3d05cbfec36..531d5fcc97e5 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_CAN_SJA1000) += sja1000.o
obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f17c3018b7c7..55cce4737518 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -106,8 +106,7 @@ static int sja1000_probe_chip(struct net_device *dev)
struct sja1000_priv *priv = netdev_priv(dev);
if (priv->reg_base && sja1000_is_absent(priv)) {
- printk(KERN_INFO "%s: probing @0x%lX failed\n",
- DRV_NAME, dev->base_addr);
+ netdev_err(dev, "probing failed\n");
return 0;
}
return -1;
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
deleted file mode 100644
index 2f6e24534231..000000000000
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
- *
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
- * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
- * definition in your flattened device tree source (DTS) file similar to:
- *
- * can@3,100 {
- * compatible = "nxp,sja1000";
- * reg = <3 0x100 0x80>;
- * interrupts = <2 0>;
- * interrupt-parent = <&mpic>;
- * nxp,external-clock-frequency = <16000000>;
- * };
- *
- * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
- * information.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/can/dev.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include "sja1000.h"
-
-#define DRV_NAME "sja1000_of_platform"
-
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
-MODULE_LICENSE("GPL v2");
-
-#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
-
-#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
-#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
-
-static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
-{
- return ioread8(priv->reg_base + reg);
-}
-
-static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
- int reg, u8 val)
-{
- iowrite8(val, priv->reg_base + reg);
-}
-
-static int sja1000_ofp_remove(struct platform_device *ofdev)
-{
- struct net_device *dev = platform_get_drvdata(ofdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
- struct resource res;
-
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
- iounmap(priv->reg_base);
- irq_dispose_mapping(dev->irq);
-
- of_address_to_resource(np, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-
- return 0;
-}
-
-static int sja1000_ofp_probe(struct platform_device *ofdev)
-{
- struct device_node *np = ofdev->dev.of_node;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource res;
- u32 prop;
- int err, irq, res_size;
- void __iomem *base;
-
- err = of_address_to_resource(np, 0, &res);
- if (err) {
- dev_err(&ofdev->dev, "invalid address\n");
- return err;
- }
-
- res_size = resource_size(&res);
-
- if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
- return -EBUSY;
- }
-
- base = ioremap_nocache(res.start, res_size);
- if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
- err = -ENOMEM;
- goto exit_release_mem;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq == 0) {
- dev_err(&ofdev->dev, "no irq found\n");
- err = -ENODEV;
- goto exit_unmap_mem;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_dispose_irq;
- }
-
- priv = netdev_priv(dev);
-
- priv->read_reg = sja1000_ofp_read_reg;
- priv->write_reg = sja1000_ofp_write_reg;
-
- err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
- if (!err)
- priv->ocr |= prop & OCR_MODE_MASK;
- else
- priv->ocr |= OCR_MODE_NORMAL; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
- if (!err)
- priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
- else
- priv->ocr |= OCR_TX0_PULLDOWN; /* default */
-
- err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
- if (!err && prop) {
- u32 divider = priv->can.clock.freq * 2 / prop;
-
- if (divider > 1)
- priv->cdr |= divider / 2 - 1;
- else
- priv->cdr |= CDR_CLKOUT_MASK;
- } else {
- priv->cdr |= CDR_CLK_OFF; /* default */
- }
-
- if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
- priv->cdr |= CDR_CBP; /* default */
-
- priv->irq_flags = IRQF_SHARED;
- priv->reg_base = base;
-
- dev->irq = irq;
-
- dev_info(&ofdev->dev,
- "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
- priv->reg_base, dev->irq, priv->can.clock.freq,
- priv->ocr, priv->cdr);
-
- platform_set_drvdata(ofdev, dev);
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- err = register_sja1000dev(dev);
- if (err) {
- dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
- DRV_NAME, err);
- goto exit_free_sja1000;
- }
-
- return 0;
-
-exit_free_sja1000:
- free_sja1000dev(dev);
-exit_dispose_irq:
- irq_dispose_mapping(irq);
-exit_unmap_mem:
- iounmap(base);
-exit_release_mem:
- release_mem_region(res.start, res_size);
-
- return err;
-}
-
-static struct of_device_id sja1000_ofp_table[] = {
- {.compatible = "nxp,sja1000"},
- {},
-};
-MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-
-static struct platform_driver sja1000_ofp_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .of_match_table = sja1000_ofp_table,
- },
- .probe = sja1000_ofp_probe,
- .remove = sja1000_ofp_remove,
-};
-
-module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 943df645b459..95a844a7ee7b 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -26,12 +26,16 @@
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include "sja1000.h"
#define DRV_NAME "sja1000_platform"
+#define SP_CAN_CLOCK (16000000 / 2)
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
@@ -66,59 +70,16 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
iowrite8(val, priv->reg_base + reg * 4);
}
-static int sp_probe(struct platform_device *pdev)
+static void sp_populate(struct sja1000_priv *priv,
+ struct sja1000_platform_data *pdata,
+ unsigned long resource_mem_flags)
{
- int err;
- void __iomem *addr;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource *res_mem, *res_irq;
- struct sja1000_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data provided!\n");
- err = -ENODEV;
- goto exit;
- }
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_mem || !res_irq) {
- err = -ENODEV;
- goto exit;
- }
-
- if (!request_mem_region(res_mem->start, resource_size(res_mem),
- DRV_NAME)) {
- err = -EBUSY;
- goto exit;
- }
-
- addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
- if (!addr) {
- err = -ENOMEM;
- goto exit_release;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_iounmap;
- }
- priv = netdev_priv(dev);
-
- dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
- if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
- priv->irq_flags |= IRQF_SHARED;
- priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
- switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
priv->read_reg = sp_read_reg32;
priv->write_reg = sp_write_reg32;
@@ -133,6 +94,124 @@ static int sp_probe(struct platform_device *pdev)
priv->write_reg = sp_write_reg8;
break;
}
+}
+
+static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
+{
+ int err;
+ u32 prop;
+
+ err = of_property_read_u32(of, "reg-io-width", &prop);
+ if (err)
+ prop = 1; /* 8 bit is default */
+
+ switch (prop) {
+ case 4:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case 2:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case 1: /* fallthrough */
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ }
+
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
+ if (!err)
+ priv->ocr |= prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-config", &prop);
+ if (!err)
+ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+ err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop);
+ if (!err && prop) {
+ u32 divider = priv->can.clock.freq * 2 / prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+ else
+ priv->cdr |= CDR_CLKOUT_MASK;
+ } else {
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+ if (!of_property_read_bool(of, "nxp,no-comparator-bypass"))
+ priv->cdr |= CDR_CBP; /* default */
+}
+
+static int sp_probe(struct platform_device *pdev)
+{
+ int err, irq = 0;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource *res_mem, *res_irq = NULL;
+ struct sja1000_platform_data *pdata;
+ struct device_node *of = pdev->dev.of_node;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata && !of) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -ENODEV;
+
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem), DRV_NAME))
+ return -EBUSY;
+
+ addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+ if (!addr)
+ return -ENOMEM;
+
+ if (of)
+ irq = irq_of_parse_and_map(of, 0);
+ else
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!irq && !res_irq)
+ return -ENODEV;
+
+ dev = alloc_sja1000dev(0);
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ if (res_irq) {
+ irq = res_irq->start;
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ priv->irq_flags |= IRQF_SHARED;
+ } else {
+ priv->irq_flags = IRQF_SHARED;
+ }
+
+ dev->irq = irq;
+ priv->reg_base = addr;
+
+ if (of)
+ sp_populate_of(priv, of);
+ else
+ sp_populate(priv, pdata, res_mem->flags);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -150,39 +229,32 @@ static int sp_probe(struct platform_device *pdev)
exit_free:
free_sja1000dev(dev);
- exit_iounmap:
- iounmap(addr);
- exit_release:
- release_mem_region(res_mem->start, resource_size(res_mem));
- exit:
return err;
}
static int sp_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct resource *res;
unregister_sja1000dev(dev);
-
- if (priv->reg_base)
- iounmap(priv->reg_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
free_sja1000dev(dev);
return 0;
}
+static struct of_device_id sp_of_table[] = {
+ {.compatible = "nxp,sja1000"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sp_of_table);
+
static struct platform_driver sp_driver = {
.probe = sp_probe,
.remove = sp_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = sp_of_table,
},
};
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6c859bba8b65..e77d11049747 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
return err;
dev->nchannels = msg.u.cardinfo.nchannels;
+ if (dev->nchannels > MAX_NET_DEVICES)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bd8f84b0b894..1656317c96f8 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -88,16 +88,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- int i;
- dev->dstats = alloc_percpu(struct pcpu_dstats);
+ dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_dstats *dstats;
- dstats = per_cpu_ptr(dev->dstats, i);
- u64_stats_init(&dstats->syncp);
- }
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5992860a39c9..063557e037f2 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -1,23 +1,24 @@
-/*======================================================================
-
- A PCMCIA ethernet driver for the 3com 3c589 card.
-
- Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
-
- 3c589_cs.c 1.162 2001/10/13 00:08:50
-
- The network driver code is based on Donald Becker's 3c589 code:
-
- Written 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
- Donald Becker may be reached at becker@scyld.com
-
- Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-======================================================================*/
+/* ======================================================================
+ *
+ * A PCMCIA ethernet driver for the 3com 3c589 card.
+ *
+ * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+ *
+ * 3c589_cs.c 1.162 2001/10/13 00:08:50
+ *
+ * The network driver code is based on Donald Becker's 3c589 code:
+ *
+ * Written 1994 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU General Public License,
+ * incorporated herein by reference.
+ * Donald Becker may be reached at becker@scyld.com
+ *
+ * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * ======================================================================
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,18 +42,20 @@
#include <linux/ioport.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
/* To minimize the size of the driver source I only define operating
- constants if they are used several times. You'll need the manual
- if you want to understand driver details. */
+ * constants if they are used several times. You'll need the manual
+ * if you want to understand driver details.
+ */
+
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_TIMER 0x0a
@@ -65,7 +68,9 @@
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
/* The top five bits written to EL3_CMD are a command, the lower
- 11 bits are the parameter, if applicable. */
+ * 11 bits are the parameter, if applicable.
+ */
+
enum c509cmd {
TotalReset = 0<<11,
SelectWindow = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
static int tc589_probe(struct pcmcia_device *link)
{
- struct el3_private *lp;
- struct net_device *dev;
+ struct el3_private *lp;
+ struct net_device *dev;
- dev_dbg(&link->dev, "3c589_attach()\n");
+ dev_dbg(&link->dev, "3c589_attach()\n");
- /* Create new ethernet device */
- dev = alloc_etherdev(sizeof(struct el3_private));
- if (!dev)
- return -ENOMEM;
- lp = netdev_priv(dev);
- link->priv = dev;
- lp->p_dev = link;
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
- spin_lock_init(&lp->lock);
- link->resource[0]->end = 16;
- link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ spin_lock_init(&lp->lock);
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
- link->config_flags |= CONF_ENABLE_IRQ;
- link->config_index = 1;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
- dev->netdev_ops = &el3_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->netdev_ops = &el3_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- return tc589_config(link);
+ return tc589_config(link);
}
static void tc589_detach(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
+ struct net_device *dev = link->priv;
- dev_dbg(&link->dev, "3c589_detach\n");
+ dev_dbg(&link->dev, "3c589_detach\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- tc589_release(link);
+ tc589_release(link);
- free_netdev(dev);
+ free_netdev(dev);
} /* tc589_detach */
static int tc589_config(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
- __be16 *phys_addr;
- int ret, i, j, multi = 0, fifo;
- unsigned int ioaddr;
- static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
- u8 *buf;
- size_t len;
-
- dev_dbg(&link->dev, "3c589_config\n");
-
- phys_addr = (__be16 *)dev->dev_addr;
- /* Is this a 3c562? */
- if (link->manf_id != MANFID_3COM)
- dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
- multi = (link->card_id == PRODID_3COM_3C562);
-
- link->io_lines = 16;
-
- /* For the 3c562, the base address must be xx00-xx7f */
- for (i = j = 0; j < 0x400; j += 0x10) {
- if (multi && (j & 0x80)) continue;
- link->resource[0]->start = j ^ 0x300;
- i = pcmcia_request_io(link);
- if (i == 0)
- break;
- }
- if (i != 0)
- goto failed;
-
- ret = pcmcia_request_irq(link, el3_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- ioaddr = dev->base_addr;
- EL3WINDOW(0);
-
- /* The 3c589 has an extra EEPROM for configuration info, including
- the hardware address. The 3c562 puts the address in the CIS. */
- len = pcmcia_get_tuple(link, 0x88, &buf);
- if (buf && len >= 6) {
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
- kfree(buf);
- } else {
- kfree(buf); /* 0 < len < 6 */
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
- dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
- dev->base_addr, dev->base_addr+15);
- goto failed;
+ struct net_device *dev = link->priv;
+ __be16 *phys_addr;
+ int ret, i, j, multi = 0, fifo;
+ unsigned int ioaddr;
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ u8 *buf;
+ size_t len;
+
+ dev_dbg(&link->dev, "3c589_config\n");
+
+ phys_addr = (__be16 *)dev->dev_addr;
+ /* Is this a 3c562? */
+ if (link->manf_id != MANFID_3COM)
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
+ multi = (link->card_id == PRODID_3COM_3C562);
+
+ link->io_lines = 16;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80))
+ continue;
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
}
- }
-
- /* The address and resource configuration register aren't loaded from
- the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
- outw(0x3f00, ioaddr + 8);
- fifo = inl(ioaddr);
-
- /* The if_port symbol can be set when the module is loaded */
- if ((if_port >= 0) && (if_port <= 3))
- dev->if_port = if_port;
- else
- dev_err(&link->dev, "invalid if_port requested\n");
-
- SET_NETDEV_DEV(dev, &link->dev);
-
- if (register_netdev(dev) != 0) {
- dev_err(&link->dev, "register_netdev() failed\n");
- goto failed;
- }
-
- netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
- (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
- return 0;
+ if (i != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, el3_interrupt);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ * the hardware address. The 3c562 puts the address in the CIS.
+ */
+ len = pcmcia_get_tuple(link, 0x88, &buf);
+ if (buf && len >= 6) {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ kfree(buf);
+ } else {
+ kfree(buf); /* 0 < len < 6 */
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == htons(0x6060)) {
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ /* The address and resource configuration register aren't loaded from
+ * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
+ */
+
+ outw(0x3f00, ioaddr + 8);
+ fifo = inl(ioaddr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ dev_err(&link->dev, "invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ dev_err(&link->dev, "register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
+ return 0;
failed:
- tc589_release(link);
- return -ENODEV;
+ tc589_release(link);
+ return -ENODEV;
} /* tc589_config */
static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
/*====================================================================*/
-/*
- Use this for commands that may take time to finish
-*/
+/* Use this for commands that may take time to finish */
+
static void tc589_wait_for_completion(struct net_device *dev, int cmd)
{
- int i = 100;
- outw(cmd, dev->base_addr + EL3_CMD);
- while (--i > 0)
- if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
- if (i == 0)
- netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
+ break;
+ if (i == 0)
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
-/*
- Read a word from the EEPROM using the regular EEPROM access register.
- Assume that we are in register window zero.
-*/
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ * Assume that we are in register window zero.
+ */
+
static u16 read_eeprom(unsigned int ioaddr, int index)
{
- int i;
- outw(EEPROM_READ + index, ioaddr + 10);
- /* Reading the eeprom takes 162 us */
- for (i = 1620; i >= 0; i--)
- if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
- break;
- return inw(ioaddr + 12);
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
}
-/*
- Set transceiver type, perhaps to something other than what the user
- specified in dev->if_port.
-*/
+/* Set transceiver type, perhaps to something other than what the user
+ * specified in dev->if_port.
+ */
+
static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
-
- EL3WINDOW(0);
- switch (if_port) {
- case 0: case 1: outw(0, ioaddr + 6); break;
- case 2: outw(3<<14, ioaddr + 6); break;
- case 3: outw(1<<14, ioaddr + 6); break;
- }
- /* On PCMCIA, this just turns on the LED */
- outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
- /* 10baseT interface, enable link beat and jabber check. */
- EL3WINDOW(4);
- outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
- EL3WINDOW(1);
- if (if_port == 2)
- lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
- else
- lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0:
+ case 1:
+ outw(0, ioaddr + 6);
+ break;
+ case 2:
+ outw(3<<14, ioaddr + 6);
+ break;
+ case 3:
+ outw(1<<14, ioaddr + 6);
+ break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
}
static void dump_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- EL3WINDOW(1);
- netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
- inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
- inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
- EL3WINDOW(4);
- netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
- inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
- inw(ioaddr+0x0a));
- EL3WINDOW(1);
+ unsigned int ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
+ EL3WINDOW(1);
}
/* Reset and restore all of the 3c589 registers. */
static void tc589_reset(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
- outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
- /* Set the station address in window 2. */
- EL3WINDOW(2);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + i);
-
- tc589_set_xcvr(dev, dev->if_port);
-
- /* Switch to the stats window, and clear all stats by reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- EL3WINDOW(6);
- for (i = 0; i < 9; i++)
- inb(ioaddr+i);
- inw(ioaddr + 10);
- inw(ioaddr + 12);
-
- /* Switch to register set 1 for normal use. */
- EL3WINDOW(1);
-
- set_rx_mode(dev);
- outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
- outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
- outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
- /* Allow status bits to be seen. */
- outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
- /* Ack all pending events, and set active indicator mask. */
- outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
- outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
| AdapterFailure, ioaddr + EL3_CMD);
}
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static int el3_config(struct net_device *dev, struct ifmap *map)
{
- if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
- if (map->port <= 3) {
- dev->if_port = map->port;
- netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
- tc589_set_xcvr(dev, dev->if_port);
- } else
- return -EINVAL;
- }
- return 0;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else {
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int el3_open(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
- if (!pcmcia_dev_present(link))
- return -ENODEV;
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
- link->open++;
- netif_start_queue(dev);
+ link->open++;
+ netif_start_queue(dev);
- tc589_reset(dev);
- init_timer(&lp->media);
- lp->media.function = media_check;
- lp->media.data = (unsigned long) dev;
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ tc589_reset(dev);
+ init_timer(&lp->media);
+ lp->media.function = media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
- dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+ dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
- return 0;
+ return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_warn(dev, "Transmit timed out!\n");
- dump_status(dev);
- dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
- /* Issue TX_RESET and TX_START commands. */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_warn(dev, "Transmit timed out!\n");
+ dump_status(dev);
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
static void pop_tx_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- /* Clear the Tx status stack. */
- for (i = 32; i > 0; i--) {
- u_char tx_status = inb(ioaddr + TX_STATUS);
- if (!(tx_status & 0x84)) break;
- /* reset transmitter on jabber error or underrun */
- if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
- if (tx_status & 0x38) {
- netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84))
+ break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
- outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
- }
}
static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- struct el3_private *priv = netdev_priv(dev);
- unsigned long flags;
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *priv = netdev_priv(dev);
+ unsigned long flags;
- netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
(long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
- /* Put out the doubleword header... */
- outw(skb->len, ioaddr + TX_FIFO);
- outw(0x00, ioaddr + TX_FIFO);
- /* ... and the packet rounded to a doubleword. */
- outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- if (inw(ioaddr + TX_FREE) <= 1536) {
- netif_stop_queue(dev);
- /* Interrupt us when the FIFO has room for max-sized packet. */
- outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
- }
+ if (inw(ioaddr + TX_FREE) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
- pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_kfree_skb(skb);
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* The EL3 interrupt handler. */
static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr;
- __u16 status;
- int i = 0, handled = 1;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ __u16 status;
+ int i = 0, handled = 1;
- if (!netif_device_present(dev))
- return IRQ_NONE;
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
- ioaddr = dev->base_addr;
+ ioaddr = dev->base_addr;
- netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
- while ((status = inw(ioaddr + EL3_STATUS)) &
+ spin_lock(&lp->lock);
+ while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
- if ((status & 0xe000) != 0x2000) {
- netdev_dbg(dev, "interrupt from dead card\n");
- handled = 0;
- break;
- }
- if (status & RxComplete)
- el3_rx(dev);
- if (status & TxAvailable) {
- netdev_dbg(dev, " TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
- }
- if (status & TxComplete)
- pop_tx_status(dev);
- if (status & (AdapterFailure | RxEarly | StatsFull)) {
- /* Handle all uncommon interrupts. */
- if (status & StatsFull) /* Empty statistics. */
- update_stats(dev);
- if (status & RxEarly) { /* Rx early is unused. */
- el3_rx(dev);
- outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
- if (status & AdapterFailure) {
- u16 fifo_diag;
- EL3WINDOW(4);
- fifo_diag = inw(ioaddr + 4);
- EL3WINDOW(1);
- netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ if ((status & 0xe000) != 0x2000) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
+ }
+ if (status & RxComplete)
+ el3_rx(dev);
+ if (status & TxAvailable) {
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & TxComplete)
+ pop_tx_status(dev);
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) {
+ /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
fifo_diag);
- if (fifo_diag & 0x0400) {
- /* Tx overrun */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
}
- if (fifo_diag & 0x2000) {
- /* Rx underrun */
- tc589_wait_for_completion(dev, RxReset);
- set_rx_mode(dev);
- outw(RxEnable, ioaddr + EL3_CMD);
+ if (++i > 10) {
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
- outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
- }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
- if (++i > 10) {
- netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
- status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
- }
- /* Acknowledge the IRQ. */
- outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
- }
- lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
- inw(ioaddr + EL3_STATUS));
- return IRQ_RETVAL(handled);
+ lp->last_irq = jiffies;
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
+ return IRQ_RETVAL(handled);
}
static void media_check(unsigned long arg)
{
- struct net_device *dev = (struct net_device *)(arg);
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
- u16 media, errs;
- unsigned long flags;
+ struct net_device *dev = (struct net_device *)(arg);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 media, errs;
+ unsigned long flags;
- if (!netif_device_present(dev)) goto reschedule;
+ if (!netif_device_present(dev))
+ goto reschedule;
- /* Check for pending interrupt with expired latency timer: with
- this, we can limp along even if the interrupt is blocked */
- if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ /* Check for pending interrupt with expired latency timer: with
+ * this, we can limp along even if the interrupt is blocked
+ */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
- if (!lp->fast_poll)
- netdev_warn(dev, "interrupt(s) dropped!\n");
-
- local_irq_save(flags);
- el3_interrupt(dev->irq, dev);
- local_irq_restore(flags);
-
- lp->fast_poll = HZ;
- }
- if (lp->fast_poll) {
- lp->fast_poll--;
- lp->media.expires = jiffies + HZ/100;
- add_timer(&lp->media);
- return;
- }
-
- /* lp->lock guards the EL3 window. Window should always be 1 except
- when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
- EL3WINDOW(4);
- media = inw(ioaddr+WN4_MEDIA) & 0xc810;
-
- /* Ignore collisions unless we've had no irq's recently */
- if (time_before(jiffies, lp->last_irq + HZ)) {
- media &= ~0x0010;
- } else {
- /* Try harder to detect carrier errors */
- EL3WINDOW(6);
- outw(StatsDisable, ioaddr + EL3_CMD);
- errs = inb(ioaddr + 0);
- outw(StatsEnable, ioaddr + EL3_CMD);
- dev->stats.tx_carrier_errors += errs;
- if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
- }
+ if (!lp->fast_poll)
+ netdev_warn(dev, "interrupt(s) dropped!\n");
+
+ local_irq_save(flags);
+ el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ /* lp->lock guards the EL3 window. Window should always be 1 except
+ * when the lock is held
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (time_before(jiffies, lp->last_irq + HZ)) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010))
+ media |= 0x0010;
+ }
- if (media != lp->media_status) {
- if ((media & lp->media_status & 0x8000) &&
- ((lp->media_status ^ media) & 0x0800))
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
netdev_info(dev, "%s link beat\n",
- (lp->media_status & 0x0800 ? "lost" : "found"));
- else if ((media & lp->media_status & 0x4000) &&
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
netdev_info(dev, "coax cable %s\n",
- (lp->media_status & 0x0010 ? "ok" : "problem"));
- if (dev->if_port == 0) {
- if (media & 0x8000) {
- if (media & 0x0800)
- netdev_info(dev, "flipped to 10baseT\n");
- else
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ netdev_info(dev, "flipped to 10baseT\n");
+ else
tc589_set_xcvr(dev, 2);
- } else if (media & 0x4000) {
- if (media & 0x0010)
- tc589_set_xcvr(dev, 1);
- else
- netdev_info(dev, "flipped to 10base2\n");
- }
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ netdev_info(dev, "flipped to 10base2\n");
+ }
+ }
+ lp->media_status = media;
}
- lp->media_status = media;
- }
- EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
}
static struct net_device_stats *el3_get_stats(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned long flags;
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ struct pcmcia_device *link = lp->p_dev;
- if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
- update_stats(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return &dev->stats;
+ if (pcmcia_dev_present(link)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return &dev->stats;
}
-/*
- Update statistics. We change to register window 6, so this should be run
- single-threaded if the device is active. This is expected to be a rare
- operation, and it's simpler for the rest of the driver to assume that
- window 1 is always valid rather than use a special window-state variable.
-
- Caller must hold the lock for this
+/* Update statistics. We change to register window 6, so this should be run
+* single-threaded if the device is active. This is expected to be a rare
+* operation, and it's simpler for the rest of the driver to assume that
+* window 1 is always valid rather than use a special window-state variable.
+*
+* Caller must hold the lock for this
*/
+
static void update_stats(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_dbg(dev, "updating the statistics.\n");
- /* Turn off statistics updates while reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- /* Switch to the stats window, and read everything. */
- EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
- dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
- dev->stats.collisions += inb(ioaddr + 3);
- dev->stats.tx_window_errors += inb(ioaddr + 4);
- dev->stats.rx_fifo_errors += inb(ioaddr + 5);
- dev->stats.tx_packets += inb(ioaddr + 6);
- /* Rx packets */ inb(ioaddr + 7);
- /* Tx deferrals */ inb(ioaddr + 8);
- /* Rx octets */ inw(ioaddr + 10);
- /* Tx octets */ inw(ioaddr + 12);
-
- /* Back to window 1, and turn statistics back on. */
- EL3WINDOW(1);
- outw(StatsEnable, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_dbg(dev, "updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */
+ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */
+ inb(ioaddr + 7);
+ /* Tx deferrals */
+ inb(ioaddr + 8);
+ /* Rx octets */
+ inw(ioaddr + 10);
+ /* Tx octets */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
}
static int el3_rx(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int worklimit = 32;
- short rx_status;
+ unsigned int ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
- netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
- while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
- worklimit--;
- if (rx_status & 0x4000) { /* Error, update stats. */
- short error = rx_status & 0x3800;
- dev->stats.rx_errors++;
- switch (error) {
- case 0x0000: dev->stats.rx_over_errors++; break;
- case 0x0800: dev->stats.rx_length_errors++; break;
- case 0x1000: dev->stats.rx_frame_errors++; break;
- case 0x1800: dev->stats.rx_length_errors++; break;
- case 0x2000: dev->stats.rx_frame_errors++; break;
- case 0x2800: dev->stats.rx_crc_errors++; break;
- }
- } else {
- short pkt_len = rx_status & 0x7ff;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 5);
-
- netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ worklimit--;
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000:
+ dev->stats.rx_over_errors++;
+ break;
+ case 0x0800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x1000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x1800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x2000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x2800:
+ dev->stats.rx_crc_errors++;
+ break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
(pkt_len+3)>>2);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
pkt_len);
- dev->stats.rx_dropped++;
- }
+ dev->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
}
- /* Pop the top of the Rx FIFO */
- tc589_wait_for_completion(dev, RxDiscard);
- }
- if (worklimit == 0)
- netdev_warn(dev, "too much work in el3_rx!\n");
- return 0;
+ if (worklimit == 0)
+ netdev_warn(dev, "too much work in el3_rx!\n");
+ return 0;
}
static void set_rx_mode(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- u16 opts = SetRxFilter | RxStation | RxBroadcast;
-
- if (dev->flags & IFF_PROMISC)
- opts |= RxMulticast | RxProm;
- else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
- opts |= RxMulticast;
- outw(opts, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+ u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
}
static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
- unsigned int ioaddr = dev->base_addr;
-
- dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ if (pcmcia_dev_present(link)) {
+ /* Turn off statistics ASAP. We update dev->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
- if (pcmcia_dev_present(link)) {
- /* Turn off statistics ASAP. We update dev->stats below. */
- outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
- /* Disable the receiver and transmitter. */
- outw(RxDisable, ioaddr + EL3_CMD);
- outw(TxDisable, ioaddr + EL3_CMD);
-
- if (dev->if_port == 2)
- /* Turn off thinnet power. Green! */
- outw(StopCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == 1) {
- /* Disable link beat and jabber */
- EL3WINDOW(4);
- outw(0, ioaddr + WN4_MEDIA);
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
}
- /* Switching back to window 0 disables the IRQ. */
- EL3WINDOW(0);
- /* But we explicitly zero the IRQ line select anyway. */
- outw(0x0f00, ioaddr + WN0_IRQ);
-
- /* Check if the card still exists */
- if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
- update_stats(dev);
- }
-
- link->open--;
- netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
- return 0;
+ return 0;
}
static const struct pcmcia_device_id tc589_ids[] = {
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 9339cccfe05a..2ae00ed83afa 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
+ unsigned int entries = 1 << size;
new_rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ entries,
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
- memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
- new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_rx_ring;
- new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
/* first copy the current receive buffers */
- overlap = min(size, lp->rx_ring_size);
+ overlap = min(entries, lp->rx_ring_size);
for (new = 0; new < overlap; new++) {
new_rx_ring[new] = lp->rx_ring[new];
new_dma_addr_list[new] = lp->rx_dma_addr[new];
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++) {
+ for (; new < entries; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new])) {
+ netif_err(lp, drv, dev, "%s dma mapping failed\n",
+ __func__);
+ dev_kfree_skb(new_skb_list[new]);
+ goto free_all_new;
+ }
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[new]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
- lp->rx_ring_size = (1 << size);
+ lp->rx_ring_size = entries;
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->rx_len_bits = (size << 4);
lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
free_all_new:
while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
- pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new]))
+ pci_unmap_single(lp->pci_dev,
+ new_dma_addr_list[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
@@ -634,8 +649,7 @@ free_new_lists:
kfree(new_dma_addr_list);
free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ sizeof(struct pcnet32_rx_head) * entries,
new_rx_ring,
new_ring_dma_addr);
}
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[i],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]);
}
lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "DMA mapping error at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
+ dma_addr_t new_dma_addr;
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ /*
+ * map the new buffer, if mapping fails, drop the packet and
+ * reuse the old buffer
+ */
if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
- skb = lp->rx_skbuff[entry];
- pci_unmap_single(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, pkt_len);
- lp->rx_skbuff[entry] = newskb;
- lp->rx_dma_addr[entry] =
- pci_map_single(lp->pci_dev,
- newskb->data,
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
- rx_in_place = 1;
+ new_dma_addr = pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+ netif_err(lp, rx_err, dev,
+ "DMA mapping error.\n");
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ } else {
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] = new_dma_addr;
+ rxp->base = cpu_to_le32(new_dma_addr);
+ rx_in_place = 1;
+ }
} else
skb = NULL;
} else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->tx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]);
}
lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
}
rmb();
- if (lp->rx_dma_addr[i] == 0)
+ if (lp->rx_dma_addr[i] == 0) {
lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i])) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev,
+ "%s pci dma mapping error\n",
+ __func__);
+ return -1;
+ }
+ }
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_ring[entry].misc = 0x00000000;
- lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ goto drop_packet;
+ }
+ lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_full = 1;
netif_stop_queue(dev);
}
+drop_packet:
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3f97d9fd0a71..85dbddd03722 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -60,6 +60,17 @@ config BCM63XX_ENET
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
+config BCMGENET
+ tristate "Broadcom GENET internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if BCMGENET=y
+ select BCM7XXX_PHY
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset.
+
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 68efa1a3fb88..fd639a0d4c7d 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index cda25ac45b47..ca6b36220d94 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6206,7 +6206,7 @@ bnx2_free_irq(struct bnx2 *bp)
static void
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
{
- int i, total_vecs, rc;
+ int i, total_vecs;
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
struct net_device *dev = bp->dev;
const int len = sizeof(bp->irq_tbl[0].name);
@@ -6229,16 +6229,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
#ifdef BCM_CNIC
total_vecs++;
#endif
- rc = -ENOSPC;
- while (total_vecs >= BNX2_MIN_MSIX_VEC) {
- rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
- if (rc <= 0)
- break;
- if (rc > 0)
- total_vecs = rc;
- }
-
- if (rc != 0)
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
+ BNX2_MIN_MSIX_VEC, total_vecs);
+ if (total_vecs < 0)
return;
msix_vecs = total_vecs;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 391f29ef6d2e..722160940ab9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.17-0"
-#define DRV_MODULE_RELDATE "2013/04/11"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data {
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9d7419e0390b..5ee13af78e53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1638,36 +1638,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
-
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
- /* how less vectors we will have? */
- int diff = msix_vec - rc;
-
- BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
- if (rc) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
- goto no_msix;
- }
- /*
- * decrease number of queues by number of unallocated entries
- */
- bp->num_ethernet_queues -= diff;
- bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
- BNX2X_DEV_INFO("New queue configuration set: %d\n",
- bp->num_queues);
- } else if (rc > 0) {
+ if (rc == -ENOSPC) {
/* Get by with single vector */
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
- if (rc) {
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
rc);
goto no_msix;
@@ -1680,8 +1660,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
}
bp->flags |= USING_MSIX_FLAG;
@@ -1873,7 +1867,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1895,7 +1889,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -4773,12 +4767,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4906,3 +4896,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_clear_bit();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index bfc58d488bb5..ec02b15fba32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -1324,4 +1324,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp);
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fdace204b054..97ea5421dd96 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down)
*/
- if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf06f7a..95dc36543548 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index cf1df8b62e2c..46e2f18df2cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2848,7 +2848,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7d4382286457..84439152e499 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0;
u8 cos;
#endif
- if (disable_int)
+ if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +929,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
- bp->def_idx, bp->def_att_idx, bp->attn_state,
- bp->spq_prod_idx, bp->stats_counter);
- BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
- bp->def_status_blk->atten_status_block.attn_bits,
- bp->def_status_blk->atten_status_block.attn_bits_ack,
- bp->def_status_blk->atten_status_block.status_block_id,
- bp->def_status_blk->atten_status_block.attn_bits_index);
- BNX2X_ERR(" def (");
- for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
- pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
-
- for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
- *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- i*sizeof(u32));
-
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
- sp_sb_data.igu_sb_id,
- sp_sb_data.igu_seg_id,
- sp_sb_data.p_func.pf_id,
- sp_sb_data.p_func.vnic_id,
- sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid,
- sp_sb_data.state);
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1077,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
#ifdef BNX2X_STOP_ON_ERROR
-
- /* event queue */
- BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
- for (i = 0; i < NUM_EQ_DESC; i++) {
- u32 *data = (u32 *)&bp->eq_ring[i].message.data;
-
- BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
- i, bp->eq_ring[i].message.opcode,
- bp->eq_ring[i].message.error);
- BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
}
/* Rings */
@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
}
#endif
- bnx2x_fw_dump(bp);
- bnx2x_mc_assert(bp);
+ if (IS_PF(bp)) {
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ }
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY;
break;
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
- type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
spe->hdr.type = cpu_to_le16(type);
@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -5221,9 +5248,9 @@ static void bnx2x_eq_int(struct bnx2x *bp)
continue;
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
- "got statistics comp event %d\n",
- bp->stats_comp++);
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
} else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations
* should run under rtnl_lock.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
}
goto next_spqe;
@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (IS_MF_SI(bp))
- /*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH
- */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
- else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -12064,11 +12076,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
return;
} else {
/* Schedule an SP task to handle rest of change */
- DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
}
}
@@ -12101,11 +12110,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
/* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response).
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2ac8e3..31297266b743 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac)
-{
- if (!vlan_mac->get_n_elements) {
- BNX2X_ERR("vlan mac object was not intialized\n");
- return -EINVAL;
- }
- return 0;
-}
-
/********************** Queue state object ***********************************/
/**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params);
}
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params)
{
- /* TODO: Not implemented yet. */
- return -1;
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
}
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE;
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f214a40a..80f6c790ed88 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index;
};
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */
union {
struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e42f48df6e94..98b53671a652 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -102,6 +102,21 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
mmiowb();
barrier();
}
+
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
+
/* VFOP - VF slow-path operation support */
#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
@@ -176,6 +191,11 @@ enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_DONE
};
+enum bnx2x_vfop_tpa_state {
+ BNX2X_VFOP_TPA_CONFIG,
+ BNX2X_VFOP_TPA_DONE
+};
+
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -716,7 +736,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -736,9 +755,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -758,9 +774,12 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ struct bnx2x_vfop *vfop;
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = macs,
@@ -782,9 +801,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -804,9 +820,12 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
struct bnx2x_vfop_cmd *cmd,
int qid, u16 vid, bool add)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ struct bnx2x_vfop *vfop;
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = NULL, /* single command */
@@ -826,9 +845,6 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -848,7 +864,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -868,9 +883,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -890,9 +902,12 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *vlans,
int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ struct bnx2x_vfop *vfop;
+
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+ vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = vlans,
@@ -911,9 +926,6 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -971,11 +983,8 @@ op_err:
op_done:
case BNX2X_VFOP_QSETUP_DONE:
vf->cfg_flags |= VF_CFG_VLAN;
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1025,34 +1034,20 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1095,8 +1090,13 @@ static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
if (vfop) {
vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
- bnx2x_vfop_qflr, cmd->done);
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false))
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
+ bnx2x_vfop_qflr, cmd->done);
+ else
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
+ bnx2x_vfop_qflr, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
cmd->block);
}
@@ -1310,7 +1310,10 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) {
case BNX2X_VFOP_QTEARDOWN_RXMODE:
/* Drop all */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true))
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ else
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
if (vfop->rc)
goto op_err;
@@ -2166,6 +2169,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_sp_map(bp, vf, q_data),
q_type);
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
+
DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2527,10 +2533,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
(is_fcoe ? 0 : 1);
- DP(BNX2X_MSG_IOV,
- "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
- BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
- first_queue_query_index + num_queues_req);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (vf->state != VF_ENABLED) {
- DP(BNX2X_MSG_IOV,
- "vf %d not enabled so no stats for it\n",
- vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
continue;
}
@@ -2597,7 +2603,8 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
/* Iterate over all VFs and invoke state transition for VFs with
* 'in-progress' slow-path operations
*/
- DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
+ "searching for pending vf operations\n");
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
@@ -3046,6 +3053,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
return -ENOMEM;
}
+/* VFOP tpa update, send update on all queues */
+static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
+ enum bnx2x_vfop_tpa_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
+ vf->abs_vfid, tpa_args->qid,
+ state);
+
+ switch (state) {
+ case BNX2X_VFOP_TPA_CONFIG:
+
+ if (tpa_args->qid < vf_rxq_count(vf)) {
+ struct bnx2x_queue_state_params *qstate =
+ &vf->op_params.qstate;
+
+ qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
+
+ /* The only thing that changes for the ramrod params
+ * between calls is the sge_map
+ */
+ qstate->params.update_tpa.sge_map =
+ tpa_args->sge_map[tpa_args->qid];
+
+ DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
+ tpa_args->qid,
+ U64_HI(qstate->params.update_tpa.sge_map),
+ U64_LO(qstate->params.update_tpa.sge_map));
+ qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ vfop->rc = bnx2x_queue_state_change(bp, qstate);
+
+ tpa_args->qid++;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
+ vfop->state = BNX2X_VFOP_TPA_DONE;
+ vfop->rc = 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_TPA_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = 0; /* loop */
+ memcpy(&vfop->args.tpa.sge_map,
+ tpa_tlv->tpa_client_info.sge_addr,
+ sizeof(vfop->args.tpa.sge_map));
+ bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
+ bnx2x_vfop_tpa, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
@@ -3074,16 +3158,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
*sbdf = vf->devfn | (vf->bus << 8);
}
-static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vf_bar_info *bar_info)
-{
- int n;
-
- bar_info->nr_bars = bp->vfdb->sriov.nres;
- for (n = 0; n < bar_info->nr_bars; n++)
- bar_info->bars[n] = vf->bars[n];
-}
-
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
@@ -3405,13 +3479,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
0, ETH_ALEN);
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0,
VLAN_HLEN);
+ }
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +3559,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj =
- &bnx2x_leading_vfq(vf, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +3643,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
BNX2X_Q_LOGICAL_STATE_ACTIVE)
return rc;
- /* configure the vlan in device on this vf's queue */
- vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
/* remove existing vlans */
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc) {
@@ -3736,13 +3809,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
bnx2x_sample_bulletin(bp);
/* if channel is down we need to self destruct */
- if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
}
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1b5a9d..b1dc751c6175 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -83,6 +83,7 @@ struct bnx2x_vf_queue {
u16 index;
u16 sb_idx;
bool is_leading;
+ bool sp_initialized;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -100,6 +101,7 @@ union bnx2x_vfop_params {
struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor;
+ struct bnx2x_queue_state_params qstate;
};
/* forward */
@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters {
atomic_t *credit; /* non NULL means 'don't consume credit' */
};
+struct bnx2x_vfop_args_tpa {
+ int qid;
+ dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
+};
+
union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor;
@@ -173,6 +180,7 @@ union bnx2x_vfop_args {
struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters;
+ struct bnx2x_vfop_args_tpa tpa;
};
struct bnx2x_vfop {
@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct vfpf_tpa_tlv *tpa_tlv);
+
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3fa6c2a2a5a9..1117ed7776b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->leading_rss = cl_id;
q->is_leading = true;
+ q->sp_initialized = true;
}
/* ask the pf to open a queue for the vf */
@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
- /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
return -ENOMEM;
}
-static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vf_mbx *mbx)
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
{
- struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc = 0;
/* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all?
@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
vf->abs_vfid);
vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
+
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i;
@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
}
/* verify vf_qid */
- if (filters->vf_qid > vf_rxq_count(vf))
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ if (bnx2x_filters_validate_mac(bp, vf, filters))
+ goto response;
+
+ if (bnx2x_filters_validate_vlan(bp, vf, filters))
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1877,6 +1912,75 @@ mbx_resp:
bnx2x_vf_mbx_resp(bp, vf);
}
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+ struct bnx2x_queue_update_tpa_params *vf_op_params =
+ &vf->op_params.qstate.params.update_tpa;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+
+ memset(vf_op_params, 0, sizeof(*vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params->complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params->dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params->max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params->max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params->max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params->sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params->sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params->sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params->tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params->update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params->update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
+
+mbx_resp:
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
}
} else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568bc7a71..c922b81170e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */
};
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
/* close VF (disable VF) */
struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 000000000000..31f55a90a197
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMGENET) += genet.o
+genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 000000000000..0ebc29769510
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,2595 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "bcmgenet: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include <asm/unaligned.h>
+
+#include "bcmgenet.h"
+
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
+/* Default highest priority queue for multi queue support */
+#define GENET_Q0_PRIORITY 0
+
+#define GENET_DEFAULT_BD_CNT \
+ (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+
+#define RX_BUF_LENGTH 2048
+#define SKB_ALIGNMENT 32
+
+/* Tx/Rx DMA register offset, skip 256 descriptors */
+#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
+#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
+
+#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d, u32 value)
+{
+ __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+ __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+#endif
+}
+
+/* Combined address + length/status setter */
+static inline void dmadesc_set(struct bcmgenet_priv *priv,
+ void __iomem *d, dma_addr_t addr, u32 val)
+{
+ dmadesc_set_length_status(priv, d, val);
+ dmadesc_set_addr(priv, d, addr);
+}
+
+static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ dma_addr_t addr;
+
+ addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+#endif
+ return addr;
+}
+
+#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
+
+#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
+ else
+ return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
+}
+
+static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
+ else
+ bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
+}
+
+/* These macros are defined to deal with register map change
+ * between GENET1.1 and GENET2. Only those currently being used
+ * by driver are defined.
+ */
+static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+/* RX/TX DMA register accessors */
+enum dma_reg {
+ DMA_RING_CFG = 0,
+ DMA_CTRL,
+ DMA_STATUS,
+ DMA_SCB_BURST_SIZE,
+ DMA_ARB_CTRL,
+ DMA_PRIORITY,
+ DMA_RING_PRIORITY,
+};
+
+static const u8 bcmgenet_dma_regs_v3plus[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x2C,
+ [DMA_PRIORITY] = 0x30,
+ [DMA_RING_PRIORITY] = 0x38,
+};
+
+static const u8 bcmgenet_dma_regs_v2[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+static const u8 bcmgenet_dma_regs_v1[] = {
+ [DMA_CTRL] = 0x00,
+ [DMA_STATUS] = 0x04,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+/* Set at runtime once bcmgenet version is known */
+static const u8 *bcmgenet_dma_regs;
+
+static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
+{
+ return netdev_priv(dev_get_drvdata(dev));
+}
+
+static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+/* RDMA/TDMA ring registers and accessors
+ * we merge the common fields and just prefix with T/D the registers
+ * having different meaning depending on the direction
+ */
+enum dma_ring_reg {
+ TDMA_READ_PTR = 0,
+ RDMA_WRITE_PTR = TDMA_READ_PTR,
+ TDMA_READ_PTR_HI,
+ RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
+ TDMA_CONS_INDEX,
+ RDMA_PROD_INDEX = TDMA_CONS_INDEX,
+ TDMA_PROD_INDEX,
+ RDMA_CONS_INDEX = TDMA_PROD_INDEX,
+ DMA_RING_BUF_SIZE,
+ DMA_START_ADDR,
+ DMA_START_ADDR_HI,
+ DMA_END_ADDR,
+ DMA_END_ADDR_HI,
+ DMA_MBUF_DONE_THRESH,
+ TDMA_FLOW_PERIOD,
+ RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
+ TDMA_WRITE_PTR,
+ RDMA_READ_PTR = TDMA_WRITE_PTR,
+ TDMA_WRITE_PTR_HI,
+ RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
+};
+
+/* GENET v4 supports 40-bits pointer addressing
+ * for obvious reasons the LO and HI word parts
+ * are contiguous, but this offsets the other
+ * registers.
+ */
+static const u8 genet_dma_ring_regs_v4[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_READ_PTR_HI] = 0x04,
+ [TDMA_CONS_INDEX] = 0x08,
+ [TDMA_PROD_INDEX] = 0x0C,
+ [DMA_RING_BUF_SIZE] = 0x10,
+ [DMA_START_ADDR] = 0x14,
+ [DMA_START_ADDR_HI] = 0x18,
+ [DMA_END_ADDR] = 0x1C,
+ [DMA_END_ADDR_HI] = 0x20,
+ [DMA_MBUF_DONE_THRESH] = 0x24,
+ [TDMA_FLOW_PERIOD] = 0x28,
+ [TDMA_WRITE_PTR] = 0x2C,
+ [TDMA_WRITE_PTR_HI] = 0x30,
+};
+
+static const u8 genet_dma_ring_regs_v123[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_CONS_INDEX] = 0x04,
+ [TDMA_PROD_INDEX] = 0x08,
+ [DMA_RING_BUF_SIZE] = 0x0C,
+ [DMA_START_ADDR] = 0x10,
+ [DMA_END_ADDR] = 0x14,
+ [DMA_MBUF_DONE_THRESH] = 0x18,
+ [TDMA_FLOW_PERIOD] = 0x1C,
+ [TDMA_WRITE_PTR] = 0x20,
+};
+
+/* Set at runtime once GENET version is known */
+static const u8 *genet_dma_ring_regs;
+
+static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static int bcmgenet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 rbuf_chk_ctrl;
+ bool rx_csum_en;
+
+ rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+
+ rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+
+ /* enable rx checksumming */
+ if (rx_csum_en)
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ else
+ rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
+ priv->desc_rxchk_en = rx_csum_en;
+ bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ bool desc_64b_en;
+ u32 tbuf_ctrl, rbuf_ctrl;
+
+ tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
+ rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+
+ desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+
+ /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
+ if (desc_64b_en) {
+ tbuf_ctrl |= RBUF_64B_EN;
+ rbuf_ctrl |= RBUF_64B_EN;
+ } else {
+ tbuf_ctrl &= ~RBUF_64B_EN;
+ rbuf_ctrl &= ~RBUF_64B_EN;
+ }
+ priv->desc_64b_en = desc_64b_en;
+
+ bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
+ bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcmgenet_set_tx_csum(dev, wanted);
+ if (changed & (NETIF_F_RXCSUM))
+ ret = bcmgenet_set_rx_csum(dev, wanted);
+
+ return ret;
+}
+
+static u32 bcmgenet_get_msglevel(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = level;
+}
+
+/* standard ethtool support functions. */
+enum bcmgenet_stat_type {
+ BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_MIB_RX,
+ BCMGENET_STAT_MIB_TX,
+ BCMGENET_STAT_RUNT,
+ BCMGENET_STAT_MISC,
+};
+
+struct bcmgenet_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcmgenet_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCMGENET_STAT_NETDEV, \
+}
+
+#define STAT_GENET_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+
+#define STAT_GENET_MISC(str, m, offset) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = BCMGENET_STAT_MISC, \
+ .reg_offset = offset, \
+}
+
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define BCMGENET_STAT_OFFSET 0xc
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
+ STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
+ STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
+ STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
+ STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* Misc UniMAC counters */
+ STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+ UMAC_RBUF_OVFL_CNT),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+};
+
+#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+
+static void bcmgenet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strlcpy(info->version, "v2.0", sizeof(info->version));
+ info->n_stats = BCMGENET_STATS_LEN;
+
+}
+
+static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMGENET_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmgenet_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcmgenet_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ switch (s->type) {
+ case BCMGENET_STAT_NETDEV:
+ continue;
+ case BCMGENET_STAT_MIB_RX:
+ case BCMGENET_STAT_MIB_TX:
+ case BCMGENET_STAT_RUNT:
+ if (s->type != BCMGENET_STAT_MIB_RX)
+ offset = BCMGENET_STAT_OFFSET;
+ val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
+ j + offset);
+ break;
+ case BCMGENET_STAT_MISC:
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+}
+
+static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcmgenet_update_mib_counters(priv);
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ if (s->type == BCMGENET_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+/* standard ethtool support functions. */
+static struct ethtool_ops bcmgenet_ethtool_ops = {
+ .get_strings = bcmgenet_get_strings,
+ .get_sset_count = bcmgenet_get_sset_count,
+ .get_ethtool_stats = bcmgenet_get_ethtool_stats,
+ .get_settings = bcmgenet_get_settings,
+ .set_settings = bcmgenet_set_settings,
+ .get_drvinfo = bcmgenet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = bcmgenet_get_msglevel,
+ .set_msglevel = bcmgenet_set_msglevel,
+};
+
+/* Power down the unimac, based on mode. */
+static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ switch (mode) {
+ case GENET_POWER_CABLE_SENSE:
+ if (priv->phydev)
+ phy_detach(priv->phydev);
+ break;
+
+ case GENET_POWER_PASSIVE:
+ /* Power down LED */
+ bcmgenet_mii_reset(priv->dev);
+ if (priv->hw_params->flags & GENET_HAS_EXT) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= (EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ if (!(priv->hw_params->flags & GENET_HAS_EXT))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+
+ switch (mode) {
+ case GENET_POWER_PASSIVE:
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_BIAS);
+ /* fallthrough */
+ case GENET_POWER_CABLE_SENSE:
+ /* enable APD */
+ reg |= EXT_PWR_DN_EN_LD;
+ break;
+ default:
+ break;
+ }
+
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(priv->dev);
+}
+
+/* ioctl handle special commands that are not present in ethtool. */
+static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int val = 0;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ val = -ENODEV;
+ else
+ val = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+
+ default:
+ val = -EINVAL;
+ break;
+ }
+
+ return val;
+}
+
+static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+ tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+ /* Advancing local write pointer */
+ if (ring->write_ptr == ring->end_ptr)
+ ring->write_ptr = ring->cb_ptr;
+ else
+ ring->write_ptr++;
+
+ return tx_cb_ptr;
+}
+
+/* Simple helper to free a control block's resources */
+static void bcmgenet_free_cb(struct enet_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+ priv->int1_mask &= ~(1 << ring->index);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_SET);
+ priv->int1_mask |= (1 << ring->index);
+}
+
+/* Unlocked version of the reclaim routine */
+static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int last_tx_cn, last_c_index, num_tx_bds;
+ struct enet_cb *tx_cb_ptr;
+ unsigned int c_index;
+
+ /* Compute how many buffers are transmited since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+
+ last_c_index = ring->c_index;
+ num_tx_bds = ring->size;
+
+ c_index &= (num_tx_bds - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_bds - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, dev,
+ "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+ __func__, ring->index,
+ c_index, last_tx_cn, last_c_index);
+
+ /* Reclaim transmitted buffers */
+ while (last_tx_cn-- > 0) {
+ tx_cb_ptr = ring->cbs + last_c_index;
+ if (tx_cb_ptr->skb) {
+ dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ dma_unmap_single(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ tx_cb_ptr->skb->len,
+ DMA_TO_DEVICE);
+ bcmgenet_free_cb(tx_cb_ptr);
+ } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+ dev->stats.tx_bytes +=
+ dma_unmap_len(tx_cb_ptr, dma_len);
+ dma_unmap_page(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ dma_unmap_len(tx_cb_ptr, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ }
+ dev->stats.tx_packets++;
+ ring->free_bds += 1;
+
+ last_c_index++;
+ last_c_index &= (num_tx_bds - 1);
+ }
+
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+ ring->int_disable(priv, ring);
+
+ if (__netif_subqueue_stopped(dev, ring->queue))
+ netif_wake_subqueue(dev, ring->queue);
+
+ ring->c_index = c_index;
+}
+
+static void bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcmgenet_tx_reclaim(dev, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_is_multiqueue(dev)) {
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
+ }
+
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+}
+
+/* Transmits a single SKB (either head of a fragment or a single SKB)
+ * caller must hold priv->lock
+ */
+static int bcmgenet_xmit_single(struct net_device *dev,
+ struct sk_buff *skb,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ unsigned int skb_len;
+ dma_addr_t mapping;
+ u32 length_status;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+
+ tx_cb_ptr->skb = skb;
+
+ skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ DMA_TX_APPEND_CRC;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ length_status |= DMA_TX_DO_CSUM;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
+
+ /* Decrement total BD count and advance our write pointer */
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Transmit a SKB fragement */
+static int bcmgenet_xmit_frag(struct net_device *dev,
+ skb_frag_t *frag,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ dma_addr_t mapping;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+ tx_cb_ptr->skb = NULL;
+
+ mapping = skb_frag_dma_map(kdev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
+ __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
+ (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+
+
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Reallocate the SKB to put enough headroom in front of it and insert
+ * the transmit checksum offsets in the descriptors
+ */
+static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
+{
+ struct status_64 *status = NULL;
+ struct sk_buff *new_skb;
+ u16 offset;
+ u8 ip_proto;
+ u16 ip_ver;
+ u32 tx_csum_info;
+
+ if (unlikely(skb_headroom(skb) < sizeof(*status))) {
+ /* If 64 byte status block enabled, must make sure skb has
+ * enough headroom for us to insert 64B status block.
+ */
+ new_skb = skb_realloc_headroom(skb, sizeof(*status));
+ dev_kfree_skb(skb);
+ if (!new_skb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = new_skb;
+ }
+
+ skb_push(skb, sizeof(*status));
+ status = (struct status_64 *)skb->data;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ offset = skb_checksum_start_offset(skb) - sizeof(*status);
+ tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
+ (offset + skb->csum_offset);
+
+ /* Set the length valid bit for TCP and UDP and just set
+ * the special UDP flag for IPv4, else just set to 0.
+ */
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ tx_csum_info |= STATUS_TX_CSUM_LV;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
+ } else
+ tx_csum_info = 0;
+
+ status->tx_csum_info = tx_csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_tx_ring *ring = NULL;
+ unsigned long flags = 0;
+ int nr_frags, index;
+ u16 dma_desc_flags;
+ int ret;
+ int i;
+
+ index = skb_get_queue_mapping(skb);
+ /* Mapping strategy:
+ * queue_mapping = 0, unclassified, packet xmited through ring16
+ * queue_mapping = 1, goes to ring 0. (highest priority queue
+ * queue_mapping = 2, goes to ring 1.
+ * queue_mapping = 3, goes to ring 2.
+ * queue_mapping = 4, goes to ring 3.
+ */
+ if (index == 0)
+ index = DESC_INDEX;
+ else
+ index -= 1;
+
+ if ((index != DESC_INDEX) && (index > priv->hw_params->tx_queues - 1)) {
+ netdev_err(dev, "%s: queue_mapping %d is invalid\n",
+ __func__, skb_get_queue_mapping(skb));
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ ring = &priv->tx_rings[index];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->free_bds <= nr_frags + 1) {
+ netif_stop_subqueue(dev, ring->queue);
+ netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* reclaim xmited skb every 8 packets. */
+ /*if (ring->free_bds < ring->size - 8)*/
+ /*__bcmgenet_tx_reclaim(dev, ring);*/
+
+ /* set the SKB transmit checksum */
+ if (priv->desc_64b_en) {
+ ret = bcmgenet_put_tx_csum(dev, skb);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ dma_desc_flags = DMA_SOP;
+ if (nr_frags == 0)
+ dma_desc_flags |= DMA_EOP;
+
+ /* Transmit single SKB or head of fragment list */
+ ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* xmit fragment */
+ for (i = 0; i < nr_frags; i++) {
+ ret = bcmgenet_xmit_frag(dev,
+ &skb_shinfo(skb)->frags[i],
+ (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* we kept a software copy of how much we should advance the TDMA
+ * producer index, now write it down to the hardware
+ */
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ netif_stop_subqueue(dev, ring->queue);
+ ring->int_enable(priv, ring);
+ }
+
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+ struct enet_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ skb = netdev_alloc_skb(priv->dev,
+ priv->rx_buf_len + SKB_ALIGNMENT);
+ if (!skb)
+ return -ENOMEM;
+
+ /* a caller did not release this control block */
+ WARN_ON(cb->skb != NULL);
+ cb->skb = skb;
+ mapping = dma_map_single(kdev, skb->data,
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcmgenet_free_cb(cb);
+ netif_err(priv, rx_err, priv->dev,
+ "%s DMA map failed\n", __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ /* assign packet, prepare descriptor, and advance pointer */
+
+ dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ /* turn on the newly assigned BD for DMA to use */
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+
+ return 0;
+}
+
+/* bcmgenet_desc_rx - descriptor based rx process.
+ * this could be called from bottom half, or from NAPI polling method.
+ */
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+ unsigned int budget)
+{
+ struct net_device *dev = priv->dev;
+ struct enet_cb *cb;
+ struct sk_buff *skb;
+ u32 dma_length_status;
+ unsigned long dma_flag;
+ int len, err;
+ unsigned int rxpktprocessed = 0, rxpkttoprocess;
+ unsigned int p_index;
+ unsigned int chksum_ok = 0;
+
+ p_index = bcmgenet_rdma_ring_readl(priv,
+ DESC_INDEX, RDMA_PROD_INDEX);
+ p_index &= DMA_P_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ rxpkttoprocess = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, dev,
+ "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+
+ while ((rxpktprocessed < rxpkttoprocess) &&
+ (rxpktprocessed < budget)) {
+
+ /* Unmap the packet contents such that we can use the
+ * RSV from the 64 bytes descriptor when enabled and save
+ * a 32-bits register read
+ */
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+
+ if (!priv->desc_64b_en) {
+ dma_length_status = dmadesc_get_length_status(priv,
+ priv->rx_bds +
+ (priv->rx_read_ptr *
+ DMA_DESC_SIZE));
+ } else {
+ struct status_64 *status;
+ status = (struct status_64 *)skb->data;
+ dma_length_status = status->length_status;
+ }
+
+ /* DMA flags and length are still valid no matter how
+ * we got the Receive Status Vector (64B RSB or register)
+ */
+ dma_flag = dma_length_status & 0xffff;
+ len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
+
+ netif_dbg(priv, rx_status, dev,
+ "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+ __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
+ dma_length_status);
+
+ rxpktprocessed++;
+
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+ /* out of memory, just drop packets at the hardware level */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+ netif_err(priv, rx_status, dev,
+ "Droping fragmented packet!\n");
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ }
+ /* report errors */
+ if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER))) {
+ netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
+ (unsigned int)dma_flag);
+ if (dma_flag & DMA_RX_CRC_ERROR)
+ dev->stats.rx_crc_errors++;
+ if (dma_flag & DMA_RX_OV)
+ dev->stats.rx_over_errors++;
+ if (dma_flag & DMA_RX_NO)
+ dev->stats.rx_frame_errors++;
+ if (dma_flag & DMA_RX_LG)
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+
+ /* discard the packet and advance consumer index.*/
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ } /* error packet */
+
+ chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
+ priv->desc_rxchk_en;
+
+ skb_put(skb, len);
+ if (priv->desc_64b_en) {
+ skb_pull(skb, 64);
+ len -= 64;
+ }
+
+ if (likely(chksum_ok))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* remove hardware 2bytes added for IP alignment */
+ skb_pull(skb, 2);
+ len -= 2;
+
+ if (priv->crc_fwd_en) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ /*Finish setting up the received SKB and send it to the kernel*/
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ if (dma_flag & DMA_RX_MULT)
+ dev->stats.multicast++;
+
+ /* Notify kernel */
+ napi_gro_receive(&priv->napi, skb);
+ cb->skb = NULL;
+ netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
+
+ /* refill RX path on the current control block */
+refill:
+ err = bcmgenet_rx_refill(priv, cb);
+ if (err)
+ netif_err(priv, rx_err, dev, "Rx refill failed\n");
+ }
+
+ return rxpktprocessed;
+}
+
+/* Assign skb to RX DMA descriptor. */
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int ret = 0;
+ int i;
+
+ netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+
+ /* loop here for each buffer needing assign */
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ /* set the DMA descriptor length once and for all
+ * it will only change if we support dynamically sizing
+ * priv->rx_buf_len, but we do not
+ */
+ dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
+ priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
+
+ ret = bcmgenet_rx_refill(priv, cb);
+ if (ret)
+ break;
+
+ }
+
+ return ret;
+}
+
+static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(&priv->dev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ if (cb->skb)
+ bcmgenet_free_cb(cb);
+ }
+}
+
+static int reset_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int timeout = 0;
+ u32 reg;
+
+ /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
+ bcmgenet_rbuf_ctrl_set(priv, 0);
+ udelay(10);
+
+ /* disable MAC while updating its registers */
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+
+ /* issue soft reset, wait for it to complete */
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ return 0;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(kdev,
+ "timeout waiting for MAC to come out of resetn\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ int ret;
+ u32 reg, cpu_mask_clear;
+
+ dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+ ret = reset_umac(priv);
+ if (ret)
+ return ret;
+
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+ /* clear tx/rx counter */
+ bcmgenet_umac_writel(priv,
+ MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+ bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
+
+ bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* init rx registers, enable ip header optimization */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+ reg |= RBUF_ALIGN_2B;
+ bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
+ bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
+
+ /* Mask all interrupts.*/
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+
+ dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+
+ /* Monitor cable plug/unpluged event for internal PHY */
+ if (phy_is_internal(priv->phydev))
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->ext_phy)
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ reg = bcmgenet_bp_mc_get(priv);
+ reg |= BIT(priv->hw_params->bp_in_en_shift);
+
+ /* bp_mask: back pressure mask */
+ if (netif_is_multiqueue(priv->dev))
+ reg |= priv->hw_params->bp_in_mask;
+ else
+ reg &= ~priv->hw_params->bp_in_mask;
+ bcmgenet_bp_mc_set(priv, reg);
+ }
+
+ /* Enable MDIO interrupts on GENET v3+ */
+ if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
+ cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+
+ bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
+ INTRL2_CPU_MASK_CLEAR);
+
+ /* Enable rx/tx engine.*/
+ dev_dbg(kdev, "done init umac\n");
+
+ return 0;
+}
+
+/* Initialize all house-keeping variables for a TX ring, along
+ * with corresponding hardware registers
+ */
+static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size,
+ unsigned int write_ptr, unsigned int end_ptr)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ u32 flow_period_val = 0;
+ unsigned int first_bd;
+
+ spin_lock_init(&ring->lock);
+ ring->index = index;
+ if (index == DESC_INDEX) {
+ ring->queue = 0;
+ ring->int_enable = bcmgenet_tx_ring16_int_enable;
+ ring->int_disable = bcmgenet_tx_ring16_int_disable;
+ } else {
+ ring->queue = index + 1;
+ ring->int_enable = bcmgenet_tx_ring_int_enable;
+ ring->int_disable = bcmgenet_tx_ring_int_disable;
+ }
+ ring->cbs = priv->tx_cbs + write_ptr;
+ ring->size = size;
+ ring->c_index = 0;
+ ring->free_bds = size;
+ ring->write_ptr = write_ptr;
+ ring->cb_ptr = write_ptr;
+ ring->end_ptr = end_ptr - 1;
+ ring->prod_index = 0;
+
+ /* Set flow period for ring != 16 */
+ if (index != DESC_INDEX)
+ flow_period_val = ENET_MAX_MTU_SIZE << 16;
+
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+ /* Disable rate control for now */
+ bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
+ TDMA_FLOW_PERIOD);
+ /* Unclassified traffic goes to ring 16 */
+ bcmgenet_tdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+
+ first_bd = write_ptr;
+
+ /* Set start and end address, read and write pointers */
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ DMA_START_ADDR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ TDMA_READ_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd,
+ TDMA_WRITE_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ DMA_END_ADDR);
+}
+
+/* Initialize a RDMA ring */
+static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size)
+{
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ int ret;
+
+ priv->num_rx_bds = TOTAL_DESC;
+ priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->rx_cbs)
+ return -ENOMEM;
+
+ ret = bcmgenet_alloc_rx_buffers(priv);
+ if (ret) {
+ kfree(priv->rx_cbs);
+ return ret;
+ }
+
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+ bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ words_per_bd * size - 1, DMA_END_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
+ DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+ return ret;
+}
+
+/* init multi xmit queues, only available for GENET2+
+ * the queue is partitioned as follows:
+ *
+ * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * with queue 0 being the highest priority queue.
+ *
+ * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
+ * descriptors: 256 - (number of tx queues * bds per queues) = 128
+ * descriptors.
+ *
+ * The transmit control block pool is then partitioned as following:
+ * - tx_cbs[0...127] are for queue 16
+ * - tx_ring_cbs[0] points to tx_cbs[128..159]
+ * - tx_ring_cbs[1] points to tx_cbs[160..191]
+ * - tx_ring_cbs[2] points to tx_cbs[192..223]
+ * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ */
+static void bcmgenet_init_multiq(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned int i, dma_enable;
+ u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
+
+ if (!netif_is_multiqueue(dev)) {
+ netdev_warn(dev, "called with non multi queue aware HW\n");
+ return;
+ }
+
+ dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ dma_enable = dma_ctrl & DMA_EN;
+ dma_ctrl &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+ /* Enable strict priority arbiter mode */
+ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ /* first 64 tx_cbs are reserved for default tx queue
+ * (ring 16)
+ */
+ bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
+ i * priv->hw_params->bds_cnt,
+ (i + 1) * priv->hw_params->bds_cnt);
+
+ /* Configure ring as decriptor ring and setup priority */
+ ring_cfg |= 1 << i;
+ dma_priority |= ((GENET_Q0_PRIORITY + i) <<
+ (GENET_MAX_MQ_CNT + 1) * i);
+ dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
+ }
+
+ /* Enable rings */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
+ reg |= ring_cfg;
+ bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+
+ /* Use configured rings priority and set ring #16 priority */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
+ reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
+ reg |= dma_priority;
+ bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
+
+ /* Configure ring as descriptor ring and re-enable DMA if enabled */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ if (dma_enable)
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ /* disable DMA */
+ bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+
+ for (i = 0; i < priv->num_tx_bds; i++) {
+ if (priv->tx_cbs[i].skb != NULL) {
+ dev_kfree_skb(priv->tx_cbs[i].skb);
+ priv->tx_cbs[i].skb = NULL;
+ }
+ }
+
+ bcmgenet_free_rx_buffers(priv);
+ kfree(priv->rx_cbs);
+ kfree(priv->tx_cbs);
+}
+
+/* init_edma: Initialize DMA control register */
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
+
+ /* by default, enable ring 16 (descriptor based) */
+ ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
+ if (ret) {
+ netdev_err(priv->dev, "failed to initialize RX ring\n");
+ return ret;
+ }
+
+ /* init rDma */
+ bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Init tDma */
+ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Initialize commont TX ring structures */
+ priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
+ priv->num_tx_bds = TOTAL_DESC;
+ priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->tx_cbs) {
+ bcmgenet_fini_dma(priv);
+ return -ENOMEM;
+ }
+
+ /* initialize multi xmit queue */
+ bcmgenet_init_multiq(priv->dev);
+
+ /* initialize special ring 16 */
+ bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
+ priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
+ TOTAL_DESC);
+
+ return 0;
+}
+
+/* NAPI polling method*/
+static int bcmgenet_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_priv *priv = container_of(napi,
+ struct bcmgenet_priv, napi);
+ unsigned int work_done;
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+
+ work_done = bcmgenet_desc_rx(priv, budget);
+
+ /* Advancing our consumer index*/
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= DMA_C_INDEX_MASK;
+ bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+ priv->rx_c_index, RDMA_CONS_INDEX);
+ if (work_done < budget) {
+ napi_complete(napi);
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+ }
+
+ return work_done;
+}
+
+/* Interrupt bottom half */
+static void bcmgenet_irq_task(struct work_struct *work)
+{
+ struct bcmgenet_priv *priv = container_of(
+ work, struct bcmgenet_priv, bcmgenet_irq_work);
+
+ netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
+
+ /* Link UP/DOWN event */
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+ if (priv->phydev)
+ phy_mac_interrupt(priv->phydev,
+ (priv->irq0_stat & UMAC_IRQ_LINK_UP));
+ priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+ }
+}
+
+/* bcmgenet_isr1: interrupt handler for ring buffer. */
+static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+ unsigned int index;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq1_stat =
+ bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ ~priv->int1_mask;
+ /* clear inerrupts*/
+ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ /* Check the MBDONE interrupts.
+ * packet is done, reclaim descriptors
+ */
+ if (priv->irq1_stat & 0x0000ffff) {
+ index = 0;
+ for (index = 0; index < 16; index++) {
+ if (priv->irq1_stat & (1 << index))
+ bcmgenet_tx_reclaim(priv->dev,
+ &priv->tx_rings[index]);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* bcmgenet_isr0: Handle various interrupts. */
+static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq0_stat =
+ bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ /* clear inerrupts*/
+ bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "IRQ=0x%x\n", priv->irq0_stat);
+
+ if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
+ /* We use NAPI(software interrupt throttling, if
+ * Rx Descriptor throttling is not used.
+ * Disable interrupt, will be enabled in the poll method.
+ */
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ if (priv->irq0_stat &
+ (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+ /* Tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ }
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_UP |
+ UMAC_IRQ_LINK_DOWN |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM |
+ UMAC_IRQ_MPD_R)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ wake_up(&priv->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ reg |= BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+
+ reg &= ~BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+}
+
+static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr)
+{
+ bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ clk_disable(priv->clk_wol);
+ /* init umac registers to synchronize s/w with h/w */
+ ret = init_umac(priv);
+ if (ret)
+ return ret;
+
+ if (priv->phydev)
+ phy_init_hw(priv->phydev);
+ /* Speed settings must be restored */
+ bcmgenet_mii_config(priv->dev);
+
+ return 0;
+}
+
+/* Returns a reusable dma control register value */
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+ u32 dma_ctrl;
+
+ /* disable DMA */
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ return dma_ctrl;
+}
+
+static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
+{
+ u32 reg;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static int bcmgenet_open(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 reg;
+ int ret;
+
+ netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
+
+ /* Turn on the clock */
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
+
+ ret = init_umac(priv);
+ if (ret)
+ goto err_clk_disable;
+
+ /* disable ethernet MAC while updating its registers */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~(CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+ if (priv->wol_enabled) {
+ ret = bcmgenet_wol_resume(priv);
+ if (ret)
+ return ret;
+ }
+
+ if (phy_is_internal(priv->phydev)) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_ENERGY_DET_MASK;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+
+ /* Disable RX/TX DMA and flush TX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv);
+
+ /* Reinitialize TDMA and RDMA and SW housekeeping */
+ ret = bcmgenet_init_dma(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize DMA\n");
+ goto err_fini_dma;
+ }
+
+ /* Always enable ring 16 - descriptor ring */
+ bcmgenet_enable_dma(priv, dma_ctrl);
+
+ ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
+ goto err_fini_dma;
+ }
+
+ ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
+ goto err_irq0;
+ }
+
+ /* Start the network engine */
+ napi_enable(&priv->napi);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg |= (CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ device_set_wakeup_capable(&dev->dev, 1);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ netif_tx_start_all_queues(dev);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ return 0;
+
+err_irq0:
+ free_irq(priv->irq0, dev);
+err_fini_dma:
+ bcmgenet_fini_dma(priv);
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int bcmgenet_close(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+ u32 reg;
+
+ netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+
+ /* Disable MAC receive */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ netif_tx_stop_all_queues(dev);
+
+ ret = bcmgenet_dma_teardown(priv);
+ if (ret)
+ return ret;
+
+ /* Disable MAC transmit. TX DMA disabled have to done before this */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ napi_disable(&priv->napi);
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_dma(priv);
+
+ free_irq(priv->irq0, priv);
+ free_irq(priv->irq1, priv);
+
+ /* Wait for pending work items to complete - we are stopping
+ * the clock now. Since interrupts are disabled, no new work
+ * will be scheduled.
+ */
+ cancel_work_sync(&priv->bcmgenet_irq_work);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+
+ if (priv->wol_enabled)
+ clk_enable(priv->clk_wol);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void bcmgenet_timeout(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+
+ dev->trans_start = jiffies;
+
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+#define MAX_MC_COUNT 16
+
+static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr,
+ int *i,
+ int *mc)
+{
+ u32 reg;
+
+ bcmgenet_umac_writel(priv,
+ addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
+ bcmgenet_umac_writel(priv,
+ addr[2] << 24 | addr[3] << 16 |
+ addr[4] << 8 | addr[5],
+ UMAC_MDF_ADDR + ((*i + 1) * 4));
+ reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+ reg |= (1 << (MAX_MC_COUNT - *mc));
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ *i += 2;
+ (*mc)++;
+}
+
+static void bcmgenet_set_rx_mode(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i, mc;
+ u32 reg;
+
+ netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+ /* Promiscous mode */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ return;
+ } else {
+ reg &= ~CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ /* UniMac doesn't support ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ netdev_warn(dev, "ALLMULTI is not supported\n");
+ return;
+ }
+
+ /* update MDF filter */
+ i = 0;
+ mc = 0;
+ /* Broadcast */
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ /* my own address.*/
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+ /* Unicast list*/
+ if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+ return;
+
+ if (!netdev_uc_empty(dev))
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ /* Multicast */
+ if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+ return;
+
+ netdev_for_each_mc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+}
+
+/* Set the hardware MAC address. */
+static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ /* Setting the MAC address at the hardware level is not possible
+ * without disabling the UniMAC RX/TX enable bits.
+ */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static u16 bcmgenet_select_queue(struct net_device *dev,
+ struct sk_buff *skb, void *accel_priv)
+{
+ return netif_is_multiqueue(dev) ? skb->queue_mapping : 0;
+}
+
+static const struct net_device_ops bcmgenet_netdev_ops = {
+ .ndo_open = bcmgenet_open,
+ .ndo_stop = bcmgenet_close,
+ .ndo_start_xmit = bcmgenet_xmit,
+ .ndo_select_queue = bcmgenet_select_queue,
+ .ndo_tx_timeout = bcmgenet_timeout,
+ .ndo_set_rx_mode = bcmgenet_set_rx_mode,
+ .ndo_set_mac_address = bcmgenet_set_mac_addr,
+ .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_set_features = bcmgenet_set_features,
+};
+
+/* Array of GENET hardware parameters/characteristics */
+static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
+ [GENET_V1] = {
+ .tx_queues = 0,
+ .rx_queues = 0,
+ .bds_cnt = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+ },
+ [GENET_V2] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT,
+ },
+ [GENET_V3] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+ [GENET_V4] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+};
+
+/* Infer hardware parameters from the detected GENET version */
+static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+{
+ struct bcmgenet_hw_params *params;
+ u32 reg;
+ u8 major;
+
+ if (GENET_IS_V4(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V4;
+ } else if (GENET_IS_V3(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V3;
+ } else if (GENET_IS_V2(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V2;
+ } else if (GENET_IS_V1(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V1;
+ }
+
+ /* enum genet_version starts at 1 */
+ priv->hw_params = &bcmgenet_hw_params[priv->version];
+ params = priv->hw_params;
+
+ /* Read GENET HW version */
+ reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
+ major = (reg >> 24 & 0x0f);
+ if (major == 5)
+ major = 4;
+ else if (major == 0)
+ major = 1;
+ if (major != priv->version) {
+ dev_err(&priv->pdev->dev,
+ "GENET version mismatch, got: %d, configured for: %d\n",
+ major, priv->version);
+ }
+
+ /* Print the GENET core version */
+ dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
+ major, (reg >> 16) & 0x0f, reg & 0xffff);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (!(params->flags & GENET_HAS_40BITS))
+ pr_warn("GENET does not support 40-bits PA\n");
+#endif
+
+ pr_debug("Configuration for version: %d\n"
+ "TXq: %1d, RXq: %1d, BDs: %1d\n"
+ "BP << en: %2d, BP msk: 0x%05x\n"
+ "HFB count: %2d, QTAQ msk: 0x%05x\n"
+ "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
+ "RDMA: 0x%05x, TDMA: 0x%05x\n"
+ "Words/BD: %d\n",
+ priv->version,
+ params->tx_queues, params->rx_queues, params->bds_cnt,
+ params->bp_in_en_shift, params->bp_in_mask,
+ params->hfb_filter_cnt, params->qtag_mask,
+ params->tbuf_offset, params->hfb_offset,
+ params->hfb_reg_offset,
+ params->rdma_offset, params->tdma_offset,
+ params->words_per_bd);
+}
+
+static const struct of_device_id bcmgenet_match[] = {
+ { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
+ { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
+ { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
+ { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { },
+};
+
+static int bcmgenet_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct bcmgenet_priv *priv;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ int err = -EIO;
+
+ /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
+ dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+ if (!dev) {
+ dev_err(&pdev->dev, "can't allocate net device\n");
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(bcmgenet_match, dn);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->irq0 || !priv->irq1) {
+ dev_err(&pdev->dev, "can't find IRQs\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr) {
+ dev_err(&pdev->dev, "can't find MAC address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "can't ioremap\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ ether_addr_copy(dev->dev_addr, macaddr);
+ dev->watchdog_timeo = 2 * HZ;
+ SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->netdev_ops = &bcmgenet_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
+
+ priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
+
+ /* Set hardware features */
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+ /* Set the needed headroom to account for any possible
+ * features enabling/disabling at runtime
+ */
+ dev->needed_headroom += 64;
+
+ netdev_boot_setup_check(dev);
+
+ priv->dev = dev;
+ priv->pdev = pdev;
+ priv->version = (enum bcmgenet_version)of_id->data;
+
+ bcmgenet_set_hw_params(priv);
+
+ spin_lock_init(&priv->lock);
+ /* Mii wait queue */
+ init_waitqueue_head(&priv->wq);
+ /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
+ priv->rx_buf_len = RX_BUF_LENGTH;
+ INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
+
+ priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ if (IS_ERR(priv->clk))
+ dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+
+ priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ if (IS_ERR(priv->clk_wol))
+ dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ err = reset_umac(priv);
+ if (err)
+ goto err_clk_disable;
+
+ err = bcmgenet_mii_init(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
+ * just the ring 16 descriptor based TX
+ */
+ netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
+ netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* Turn off the main clock, WOL clock is handled separately */
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return err;
+
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+err:
+ free_netdev(dev);
+ return err;
+}
+
+static int bcmgenet_remove(struct platform_device *pdev)
+{
+ struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ unregister_netdev(priv->dev);
+ bcmgenet_mii_exit(priv->dev);
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+
+static struct platform_driver bcmgenet_driver = {
+ .probe = bcmgenet_probe,
+ .remove = bcmgenet_remove,
+ .driver = {
+ .name = "bcmgenet",
+ .owner = THIS_MODULE,
+ .of_match_table = bcmgenet_match,
+ },
+};
+module_platform_driver(bcmgenet_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+MODULE_ALIAS("platform:bcmgenet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 000000000000..8e48db8a1789
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,630 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+*/
+#ifndef __BCMGENET_H__
+#define __BCMGENET_H__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+/* total number of Buffer Descriptors, same for Rx/Tx */
+#define TOTAL_DESC 256
+
+/* which ring is descriptor based */
+#define DESC_INDEX 16
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 6
+#define ENET_PAD 8
+#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+#define DMA_MAX_BURST_LENGTH 0x10
+
+/* misc. configuration */
+#define CLEAR_ALL_HFB 0xFF
+#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
+#define DMA_FC_THRESH_LO 5
+
+/* 64B receive/transmit status block */
+struct status_64 {
+ u32 length_status; /* length and peripheral status */
+ u32 ext_status; /* Extended status*/
+ u32 rx_csum; /* partial rx checksum */
+ u32 unused1[9]; /* unused */
+ u32 tx_csum_info; /* Tx checksum info. */
+ u32 unused2[3]; /* unused */
+};
+
+/* Rx status bits */
+#define STATUS_RX_EXT_MASK 0x1FFFFF
+#define STATUS_RX_CSUM_MASK 0xFFFF
+#define STATUS_RX_CSUM_OK 0x10000
+#define STATUS_RX_CSUM_FR 0x20000
+#define STATUS_RX_PROTO_TCP 0
+#define STATUS_RX_PROTO_UDP 1
+#define STATUS_RX_PROTO_ICMP 2
+#define STATUS_RX_PROTO_OTHER 3
+#define STATUS_RX_PROTO_MASK 3
+#define STATUS_RX_PROTO_SHIFT 18
+#define STATUS_FILTER_INDEX_MASK 0xFFFF
+/* Tx status bits */
+#define STATUS_TX_CSUM_START_MASK 0X7FFF
+#define STATUS_TX_CSUM_START_SHIFT 16
+#define STATUS_TX_CSUM_PROTO_UDP 0x8000
+#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
+#define STATUS_TX_CSUM_LV 0x80000000
+
+/* DMA Descriptor */
+#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
+#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
+#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
+
+/* Rx/Tx common counter group */
+struct bcmgenet_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcmgenet_rx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcmgenet_tx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcmgenet_mib_counters {
+ struct bcmgenet_rx_counters rx;
+ struct bcmgenet_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+ u32 mdf_err_cnt;
+};
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define UMAC_SPEED_10 0
+#define UMAC_SPEED_100 1
+#define UMAC_SPEED_1000 2
+#define UMAC_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00C
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+#define UMAC_MDIO_CMD 0x614
+#define MDIO_START_BUSY (1 << 29)
+#define MDIO_READ_FAIL (1 << 28)
+#define MDIO_RD (2 << 26)
+#define MDIO_WR (1 << 26)
+#define MDIO_PMD_SHIFT 21
+#define MDIO_PMD_MASK 0x1F
+#define MDIO_REG_SHIFT 16
+#define MDIO_REG_MASK 0x1F
+
+#define UMAC_RBUF_OVFL_CNT 0x61C
+
+#define UMAC_MPD_CTRL 0x620
+#define MPD_EN (1 << 0)
+#define MPD_PW_EN (1 << 27)
+#define MPD_MSEQ_LEN_SHIFT 16
+#define MPD_MSEQ_LEN_MASK 0xFF
+
+#define UMAC_MPD_PW_MS 0x624
+#define UMAC_MPD_PW_LS 0x628
+#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_MDF_ERR_CNT 0x638
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RESET_RX (1 << 0)
+#define MIB_RESET_RUNT (1 << 1)
+#define MIB_RESET_TX (1 << 2)
+
+#define RBUF_CTRL 0x00
+#define RBUF_64B_EN (1 << 0)
+#define RBUF_ALIGN_2B (1 << 1)
+#define RBUF_BAD_DIS (1 << 2)
+
+#define RBUF_STATUS 0x0C
+#define RBUF_STATUS_WOL (1 << 0)
+#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
+#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
+
+#define RBUF_CHK_CTRL 0x14
+#define RBUF_RXCHK_EN (1 << 0)
+#define RBUF_SKIP_FCS (1 << 4)
+
+#define RBUF_TBUF_SIZE_CTRL 0xb4
+
+#define RBUF_HFB_CTRL_V1 0x38
+#define RBUF_HFB_FILTER_EN_SHIFT 16
+#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
+#define RBUF_HFB_EN (1 << 0)
+#define RBUF_HFB_256B (1 << 1)
+#define RBUF_ACPI_EN (1 << 2)
+
+#define RBUF_HFB_LEN_V1 0x3C
+#define RBUF_FLTR_LEN_MASK 0xFF
+#define RBUF_FLTR_LEN_SHIFT 8
+
+#define TBUF_CTRL 0x00
+#define TBUF_BP_MC 0x0C
+
+#define TBUF_CTRL_V1 0x80
+#define TBUF_BP_MC_V1 0xA0
+
+#define HFB_CTRL 0x00
+#define HFB_FLT_ENABLE_V3PLUS 0x04
+#define HFB_FLT_LEN_V2 0x04
+#define HFB_FLT_LEN_V3PLUS 0x1C
+
+/* uniMac intrl2 registers */
+#define INTRL2_CPU_STAT 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0C
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* INTRL2 instance 0 definitions */
+#define UMAC_IRQ_SCB (1 << 0)
+#define UMAC_IRQ_EPHY (1 << 1)
+#define UMAC_IRQ_PHY_DET_R (1 << 2)
+#define UMAC_IRQ_PHY_DET_F (1 << 3)
+#define UMAC_IRQ_LINK_UP (1 << 4)
+#define UMAC_IRQ_LINK_DOWN (1 << 5)
+#define UMAC_IRQ_UMAC (1 << 6)
+#define UMAC_IRQ_UMAC_TSV (1 << 7)
+#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
+#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
+#define UMAC_IRQ_HFB_SM (1 << 10)
+#define UMAC_IRQ_HFB_MM (1 << 11)
+#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
+#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
+#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
+#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
+#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
+#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
+/* Only valid for GENETv3+ */
+#define UMAC_IRQ_MDIO_DONE (1 << 23)
+#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+
+/* Register block offsets */
+#define GENET_SYS_OFF 0x0000
+#define GENET_GR_BRIDGE_OFF 0x0040
+#define GENET_EXT_OFF 0x0080
+#define GENET_INTRL2_0_OFF 0x0200
+#define GENET_INTRL2_1_OFF 0x0240
+#define GENET_RBUF_OFF 0x0300
+#define GENET_UMAC_OFF 0x0800
+
+/* SYS block offsets and register definitions */
+#define SYS_REV_CTRL 0x00
+#define SYS_PORT_CTRL 0x04
+#define PORT_MODE_INT_EPHY 0
+#define PORT_MODE_INT_GPHY 1
+#define PORT_MODE_EXT_EPHY 2
+#define PORT_MODE_EXT_GPHY 3
+#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
+#define PORT_MODE_EXT_RVMII_50 4
+#define LED_ACT_SOURCE_MAC (1 << 9)
+
+#define SYS_RBUF_FLUSH_CTRL 0x08
+#define SYS_TBUF_FLUSH_CTRL 0x0C
+#define RBUF_FLUSH_CTRL_V1 0x04
+
+/* Ext block register offsets and definitions */
+#define EXT_EXT_PWR_MGMT 0x00
+#define EXT_PWR_DOWN_BIAS (1 << 0)
+#define EXT_PWR_DOWN_DLL (1 << 1)
+#define EXT_PWR_DOWN_PHY (1 << 2)
+#define EXT_PWR_DN_EN_LD (1 << 3)
+#define EXT_ENERGY_DET (1 << 4)
+#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_PHY_RESET (1 << 8)
+#define EXT_ENERGY_DET_MASK (1 << 12)
+
+#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN (1 << 0)
+#define RGMII_LINK (1 << 4)
+#define OOB_DISABLE (1 << 5)
+#define ID_MODE_DIS (1 << 16)
+
+#define EXT_GPHY_CTRL 0x1C
+#define EXT_CFG_IDDQ_BIAS (1 << 0)
+#define EXT_CFG_PWR_DOWN (1 << 1)
+#define EXT_GPHY_RESET (1 << 5)
+
+/* DMA rings size */
+#define DMA_RING_SIZE (0x40)
+#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
+
+/* DMA registers common definitions */
+#define DMA_RW_POINTER_MASK 0x1FF
+#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
+#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
+#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
+#define DMA_BUFFER_DONE_CNT_SHIFT 16
+#define DMA_P_INDEX_MASK 0xFFFF
+#define DMA_C_INDEX_MASK 0xFFFF
+
+/* DMA ring size register */
+#define DMA_RING_SIZE_MASK 0xFFFF
+#define DMA_RING_SIZE_SHIFT 16
+#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
+
+/* DMA interrupt threshold register */
+#define DMA_INTR_THRESHOLD_MASK 0x00FF
+
+/* DMA XON/XOFF register */
+#define DMA_XON_THREHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_SHIFT 16
+
+/* DMA flow period register */
+#define DMA_FLOW_PERIOD_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_SHIFT 16
+
+
+/* DMA control register */
+#define DMA_EN (1 << 0)
+#define DMA_RING_BUF_EN_SHIFT 0x01
+#define DMA_RING_BUF_EN_MASK 0xFFFF
+#define DMA_TSB_SWAP_EN (1 << 20)
+
+/* DMA status register */
+#define DMA_DISABLED (1 << 0)
+#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
+
+/* DMA SCB burst size register */
+#define DMA_SCB_BURST_SIZE_MASK 0x1F
+
+/* DMA activity vector register */
+#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
+
+/* DMA backpressure mask register */
+#define DMA_BACKPRESSURE_MASK 0x1FFFF
+#define DMA_PFC_ENABLE (1 << 31)
+
+/* DMA backpressure status register */
+#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
+
+/* DMA override register */
+#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
+#define DMA_REGISTER_MODE (1 << 1)
+
+/* DMA timeout register */
+#define DMA_TIMEOUT_MASK 0xFFFF
+#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
+
+/* TDMA rate limiting control register */
+#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
+
+/* TDMA arbitration control register */
+#define DMA_ARBITER_MODE_MASK 0x03
+#define DMA_RING_BUF_PRIORITY_MASK 0x1F
+#define DMA_RING_BUF_PRIORITY_SHIFT 5
+#define DMA_RATE_ADJ_MASK 0xFF
+
+/* Tx/Rx Dma Descriptor common bits*/
+#define DMA_BUFLENGTH_MASK 0x0fff
+#define DMA_BUFLENGTH_SHIFT 16
+#define DMA_OWN 0x8000
+#define DMA_EOP 0x4000
+#define DMA_SOP 0x2000
+#define DMA_WRAP 0x1000
+/* Tx specific Dma descriptor bits */
+#define DMA_TX_UNDERRUN 0x0200
+#define DMA_TX_APPEND_CRC 0x0040
+#define DMA_TX_OW_CRC 0x0020
+#define DMA_TX_DO_CSUM 0x0010
+#define DMA_TX_QTAG_SHIFT 7
+
+/* Rx Specific Dma descriptor bits */
+#define DMA_RX_CHK_V3PLUS 0x8000
+#define DMA_RX_CHK_V12 0x1000
+#define DMA_RX_BRDCAST 0x0040
+#define DMA_RX_MULT 0x0020
+#define DMA_RX_LG 0x0010
+#define DMA_RX_NO 0x0008
+#define DMA_RX_RXER 0x0004
+#define DMA_RX_CRC_ERROR 0x0002
+#define DMA_RX_OV 0x0001
+#define DMA_RX_FI_MASK 0x001F
+#define DMA_RX_FI_SHIFT 0x0007
+#define DMA_DESC_ALLOC_MASK 0x00FF
+
+#define DMA_ARBITER_RR 0x00
+#define DMA_ARBITER_WRR 0x01
+#define DMA_ARBITER_SP 0x02
+
+struct enet_cb {
+ struct sk_buff *skb;
+ void __iomem *bd_addr;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* power management mode */
+enum bcmgenet_power_mode {
+ GENET_POWER_CABLE_SENSE = 0,
+ GENET_POWER_PASSIVE,
+};
+
+struct bcmgenet_priv;
+
+/* We support both runtime GENET detection and compile-time
+ * to optimize code-paths for a given hardware
+ */
+enum bcmgenet_version {
+ GENET_V1 = 1,
+ GENET_V2,
+ GENET_V3,
+ GENET_V4
+};
+
+#define GENET_IS_V1(p) ((p)->version == GENET_V1)
+#define GENET_IS_V2(p) ((p)->version == GENET_V2)
+#define GENET_IS_V3(p) ((p)->version == GENET_V3)
+#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+
+/* Hardware flags */
+#define GENET_HAS_40BITS (1 << 0)
+#define GENET_HAS_EXT (1 << 1)
+#define GENET_HAS_MDIO_INTR (1 << 2)
+
+/* BCMGENET hardware parameters, keep this structure nicely aligned
+ * since it is going to be used in hot paths
+ */
+struct bcmgenet_hw_params {
+ u8 tx_queues;
+ u8 rx_queues;
+ u8 bds_cnt;
+ u8 bp_in_en_shift;
+ u32 bp_in_mask;
+ u8 hfb_filter_cnt;
+ u8 qtag_mask;
+ u16 tbuf_offset;
+ u32 hfb_offset;
+ u32 hfb_reg_offset;
+ u32 rdma_offset;
+ u32 tdma_offset;
+ u32 words_per_bd;
+ u32 flags;
+};
+
+struct bcmgenet_tx_ring {
+ spinlock_t lock; /* ring lock */
+ unsigned int index; /* ring index */
+ unsigned int queue; /* queue index */
+ struct enet_cb *cbs; /* tx ring buffer control block*/
+ unsigned int size; /* size of each tx ring */
+ unsigned int c_index; /* last consumer index of each ring*/
+ unsigned int free_bds; /* # of free bds for each ring */
+ unsigned int write_ptr; /* Tx ring write pointer SW copy */
+ unsigned int prod_index; /* Tx ring producer index SW copy */
+ unsigned int cb_ptr; /* Tx ring initial CB ptr */
+ unsigned int end_ptr; /* Tx ring end CB ptr */
+ void (*int_enable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+ void (*int_disable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+};
+
+/* device context */
+struct bcmgenet_priv {
+ void __iomem *base;
+ enum bcmgenet_version version;
+ struct net_device *dev;
+ spinlock_t lock;
+ spinlock_t bh_lock;
+ u32 int0_mask;
+ u32 int1_mask;
+
+ /* NAPI for descriptor based rx */
+ struct napi_struct napi ____cacheline_aligned;
+
+ /* transmit variables */
+ void __iomem *tx_bds;
+ struct enet_cb *tx_cbs;
+ unsigned int num_tx_bds;
+
+ struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+
+ /* receive variables */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ int rx_bd_assign_index;
+ struct enet_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_buf_len;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* other misc variables */
+ struct bcmgenet_hw_params *hw_params;
+
+ /* MDIO bus variables */
+ wait_queue_head_t wq;
+ struct phy_device *phydev;
+ struct device_node *phy_dn;
+ struct mii_bus *mii_bus;
+
+ /* PHY device variables */
+ int old_duplex;
+ int old_link;
+ int old_pause;
+ phy_interface_t phy_interface;
+ int phy_addr;
+ int ext_phy;
+
+ /* Interrupt variables */
+ struct work_struct bcmgenet_irq_work;
+ int irq0;
+ int irq1;
+ unsigned int irq0_stat;
+ unsigned int irq1_stat;
+
+ /* HW descriptors/checksum variables */
+ bool desc_64b_en;
+ bool desc_rxchk_en;
+ bool crc_fwd_en;
+
+ unsigned int dma_rx_chk_bit;
+
+ u32 msg_enable;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ /* WOL */
+ unsigned long wol_enabled;
+ struct clk *clk_wol;
+ u32 wolopts;
+
+ struct bcmgenet_mib_counters mib;
+};
+
+#define GENET_IO_MACRO(name, offset) \
+static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
+ u32 off) \
+{ \
+ return __raw_readl(priv->base + offset + off); \
+} \
+static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+}
+
+GENET_IO_MACRO(ext, GENET_EXT_OFF);
+GENET_IO_MACRO(umac, GENET_UMAC_OFF);
+GENET_IO_MACRO(sys, GENET_SYS_OFF);
+
+/* interrupt l2 registers accessors */
+GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
+GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
+
+/* HFB register accessors */
+GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
+
+/* GENET v2+ HFB control and filter len helpers */
+GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
+
+/* RBUF register accessors */
+GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
+
+/* MDIO routines */
+int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev);
+void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
+
+#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 000000000000..4608673beaff
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,464 @@
+/*
+ * Broadcom GENET MDIO routines
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/brcmphy.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+#include "bcmgenet.h"
+
+/* read a value from the MII */
+static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+ int ret;
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+ /* Start MDIO transaction*/
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+ & MDIO_START_BUSY),
+ HZ / 100);
+ ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+
+ if (ret & MDIO_READ_FAIL)
+ return -EIO;
+
+ return ret & 0xffff;
+}
+
+/* write a value to the MII */
+static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
+ int location, u16 val)
+{
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT) | (0xffff & val)),
+ UMAC_MDIO_CMD);
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+ MDIO_START_BUSY),
+ HZ / 100);
+
+ return 0;
+}
+
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+static void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u32 reg, cmd_bits = 0;
+ unsigned int status_changed = 0;
+
+ if (priv->old_link != phydev->link) {
+ status_changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (phydev->link) {
+ /* program UMAC and RGMII block based on established link
+ * speed, pause, and duplex.
+ * the speed set in umac->cmd tell RGMII block which clock
+ * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
+ * receive clock is provided by PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = UMAC_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = UMAC_SPEED_100;
+ else
+ cmd_bits = UMAC_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (priv->old_duplex != phydev->duplex) {
+ status_changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ status_changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ /* pause capability */
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ if (status_changed)
+ phy_print_status(phydev);
+}
+
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (priv->phydev) {
+ phy_init_hw(priv->phydev);
+ phy_start_aneg(priv->phydev);
+ }
+}
+
+static void bcmgenet_ephy_power_up(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg = 0;
+
+ /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
+ if (!GENET_IS_V4(priv))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(2);
+
+ reg &= ~EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ udelay(20);
+}
+
+static void bcmgenet_internal_phy_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Power up EPHY */
+ bcmgenet_ephy_power_up(dev);
+ /* enable APD */
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(dev);
+}
+
+static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+}
+
+int bcmgenet_mii_config(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct device *kdev = &priv->pdev->dev;
+ const char *phy_name = NULL;
+ u32 id_mode_dis = 0;
+ u32 port_ctrl;
+ u32 reg;
+
+ priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
+ if (phy_is_internal(priv->phydev))
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MOCA:
+ /* Irrespective of the actually configured PHY speed (100 or
+ * 1000) GENETv4 only has an internal GPHY so we will just end
+ * up masking the Gigabit features from what we support, not
+ * switching to the EPHY
+ */
+ if (GENET_IS_V4(priv))
+ port_ctrl = PORT_MODE_INT_GPHY;
+ else
+ port_ctrl = PORT_MODE_INT_EPHY;
+
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ if (phy_is_internal(priv->phydev)) {
+ phy_name = "internal PHY";
+ bcmgenet_internal_phy_setup(dev);
+ } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ phy_name = "MoCA";
+ bcmgenet_moca_phy_setup(priv);
+ }
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ phy_name = "external MII";
+ phydev->supported &= PHY_BASIC_FEATURES;
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_REVMII:
+ phy_name = "external RvMII";
+ /* of_mdiobus_register took care of reading the 'max-speed'
+ * PHY property for us, effectively limiting the PHY supported
+ * capabilities, use that knowledge to also configure the
+ * Reverse MII interface correctly.
+ */
+ if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+ PHY_BASIC_FEATURES)
+ port_ctrl = PORT_MODE_EXT_RVMII_25;
+ else
+ port_ctrl = PORT_MODE_EXT_RVMII_50;
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = BIT(16);
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (id_mode_dis)
+ phy_name = "external RGMII (no delay)";
+ else
+ phy_name = "external RGMII (TX delay)";
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg |= RGMII_MODE_EN | id_mode_dis;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ break;
+ default:
+ dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
+ return -EINVAL;
+ }
+
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_probe(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ unsigned int phy_flags;
+ int ret;
+
+ if (priv->phydev) {
+ pr_info("PHY already attached\n");
+ return 0;
+ }
+
+ if (priv->phy_dn)
+ phydev = of_phy_connect(dev, priv->phy_dn,
+ bcmgenet_mii_setup, 0,
+ priv->phy_interface);
+ else
+ phydev = of_phy_connect_fixed_link(dev,
+ bcmgenet_mii_setup,
+ priv->phy_interface);
+
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ priv->old_link = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+ priv->phydev = phydev;
+
+ /* Configure port multiplexer based on what the probed PHY device since
+ * reading the 'max-speed' property determines the maximum supported
+ * PHY speed which is needed for bcmgenet_mii_config() to configure
+ * things appropriately.
+ */
+ ret = bcmgenet_mii_config(dev);
+ if (ret) {
+ phy_disconnect(priv->phydev);
+ return ret;
+ }
+
+ phy_flags = PHY_BRCM_100MBPS_WAR;
+
+ /* workarounds are only needed for 100Mpbs PHYs, and
+ * never on GENET V1 hardware
+ */
+ if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
+ phy_flags = 0;
+
+ phydev->dev_flags |= phy_flags;
+ phydev->advertising = phydev->supported;
+
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs
+ */
+ if (phy_is_internal(priv->phydev))
+ priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
+ else
+ priv->mii_bus->irq[phydev->addr] = PHY_POLL;
+
+ pr_info("attached PHY at address %d [%s]\n",
+ phydev->addr, phydev->drv->name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+{
+ struct mii_bus *bus;
+
+ if (priv->mii_bus)
+ return 0;
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ pr_err("failed to allocate\n");
+ return -ENOMEM;
+ }
+
+ bus = priv->mii_bus;
+ bus->priv = priv->dev;
+ bus->name = "bcmgenet MII bus";
+ bus->parent = &priv->pdev->dev;
+ bus->read = bcmgenet_mii_read;
+ bus->write = bcmgenet_mii_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ priv->pdev->name, priv->pdev->id);
+
+ bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *mdio_dn;
+ char *compat;
+ int ret;
+
+ compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+ if (!compat)
+ return -ENOMEM;
+
+ mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ kfree(compat);
+ if (!mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+ if (ret) {
+ dev_err(kdev, "failed to register MDIO bus\n");
+ return ret;
+ }
+
+ /* Fetch the PHY phandle */
+ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+
+ /* Get the link mode */
+ priv->phy_interface = of_get_phy_mode(dn);
+
+ return 0;
+}
+
+int bcmgenet_mii_init(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = bcmgenet_mii_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = bcmgenet_mii_of_init(priv);
+ if (ret)
+ goto out_free;
+
+ ret = bcmgenet_mii_probe(dev);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mdiobus_unregister(priv->mii_bus);
+out_free:
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+ return ret;
+}
+
+void bcmgenet_mii_exit(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3167ed6593b0..6e5e7c0ffbd7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -11362,12 +11362,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
msix_ent[i].vector = 0;
}
- rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
if (rc < 0) {
return false;
- } else if (rc != 0) {
- if (pci_enable_msix(tp->pdev, msix_ent, rc))
- return false;
+ } else if (rc < tp->irq_cnt) {
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc);
tp->irq_cnt = rc;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cf64f3d0b60d..bf436d0a1094 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2666,9 +2666,11 @@ bnad_enable_msix(struct bnad *bnad)
for (i = 0; i < bnad->msix_num; i++)
bnad->msix_table[i].entry = i;
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
- if (ret > 0) {
- /* Not enough MSI-X vectors. */
+ ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
+ 1, bnad->msix_num);
+ if (ret < 0) {
+ goto intx_mode;
+ } else if (ret < bnad->msix_num) {
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
@@ -2681,18 +2683,11 @@ bnad_enable_msix(struct bnad *bnad)
bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
- if (bnad->msix_num > ret)
- goto intx_mode;
-
- /* Try once more with adjusted numbers */
- /* If this fails, fall back to INTx */
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
- bnad->msix_num);
- if (ret)
+ if (bnad->msix_num > ret) {
+ pci_disable_msix(bnad->pcidev);
goto intx_mode;
-
- } else if (ret < 0)
- goto intx_mode;
+ }
+ }
pci_intx(bnad->pcidev, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d77334d7d9..07bbb711b7e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
- int i, err;
+ int i;
vectors = ARRAY_SIZE(entries);
for (i = 0; i < vectors; ++i)
entries[i].entry = i;
- while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0)
- pci_disable_msix(adap->pdev);
-
- if (!err && vectors < (adap->params.nports + 1)) {
- pci_disable_msix(adap->pdev);
- err = -1;
- }
+ vectors = pci_enable_msix_range(adap->pdev, entries,
+ adap->params.nports + 1, vectors);
+ if (vectors < 0)
+ return vectors;
- if (!err) {
- for (i = 0; i < vectors; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- adap->msix_nvectors = vectors;
- }
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
- return err;
+ return 0;
}
static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b30b9ed..944f2cbc1795 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
};
enum {
@@ -254,6 +255,7 @@ struct vpd_params {
u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
+ u8 pn[PN_LEN + 1];
};
struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
unsigned char bypass;
unsigned int ofldq_wr_cred;
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
#include "t4fw_api.h"
@@ -957,7 +960,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
-
+const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 43ab35fea48d..4660f55e292b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5011, 4),
CH_DEVICE(0x5012, 4),
CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5014, 4),
+ CH_DEVICE(0x5015, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
@@ -273,6 +275,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5411, 4),
CH_DEVICE(0x5412, 4),
CH_DEVICE(0x5413, 4),
+ CH_DEVICE(0x5414, 4),
+ CH_DEVICE(0x5415, 4),
{ 0, }
};
@@ -432,6 +436,9 @@ static void link_report(struct net_device *dev)
case SPEED_100:
s = "100Mbps";
break;
+ case 40000: /* Need a SPEED_40000 in ethtool.h */
+ s = "40Gbps";
+ break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2068,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
- 0x41300, 0x413c4,
+ 0x41304, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
@@ -2089,7 +2096,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
- 0x49300, 0x493c4,
+ 0x49304, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
@@ -2199,6 +2206,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
+ else if (type == FW_PORT_TYPE_BP40_BA)
+ v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2215,6 +2224,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
v |= FW_PORT_CAP_SPEED_1G;
if (caps & ADVERTISED_10000baseT_Full)
v |= FW_PORT_CAP_SPEED_10G;
+ if (caps & ADVERTISED_40000baseSR4_Full)
+ v |= FW_PORT_CAP_SPEED_40G;
return v;
}
@@ -2269,6 +2280,8 @@ static unsigned int speed_to_caps(int speed)
return FW_PORT_CAP_SPEED_1G;
if (speed == SPEED_10000)
return FW_PORT_CAP_SPEED_10G;
+ if (speed == 40000) /* Need SPEED_40000 in ethtool.h */
+ return FW_PORT_CAP_SPEED_40G;
return 0;
}
@@ -2296,8 +2309,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) || (speed == SPEED_1000) ||
- (speed == SPEED_10000))
+ if (!(lc->supported & cap) ||
+ (speed == SPEED_1000) ||
+ (speed == SPEED_10000) ||
+ (speed == 40000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -3765,6 +3780,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_pktshift = adap->sge.pktshift;
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -5370,6 +5386,21 @@ static int adap_init0(struct adapter *adap)
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
/*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
@@ -5603,9 +5634,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
-static inline bool is_10g_port(const struct link_config *lc)
+static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5661,7 @@ static void cfg_queues(struct adapter *adap)
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
- n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5676,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
@@ -5737,7 +5769,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, err, want, need;
+ int i, want, need;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5785,30 @@ static int enable_msix(struct adapter *adap)
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
- while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
- want = err;
+ want = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (want < 0)
+ return want;
- if (!err) {
- /*
- * Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = want - EXTRA_VECS - ofld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
- }
- if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
- }
- for (i = 0; i < want; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- } else if (err > 0)
- dev_info(adap->pdev_dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
- return err;
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
#undef EXTRA_VECS
@@ -5801,11 +5831,6 @@ static int init_rss(struct adapter *adap)
static void print_port_info(const struct net_device *dev)
{
- static const char *base[] = {
- "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
- };
-
char buf[80];
char *bufp = buf;
const char *spd = "";
@@ -5823,9 +5848,11 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
if (bufp != buf)
--bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+ sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
@@ -5833,8 +5860,8 @@ static void print_port_info(const struct net_device *dev)
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ netdev_info(dev, "S/N: %s, P/N: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.pn);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82533e4..e274a047528f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
/* packet data */
bool enable_fw_ofld_conn; /* Enable connection through fw */
/* WR */
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64fcf19..af76b25bb606 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -706,11 +706,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
* @skb: the packet
*
* Returns whether an Ethernet packet is small enough to fit as
- * immediate data.
+ * immediate data. Return value corresponds to headroom required.
*/
static inline int is_eth_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+ int hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
}
/**
@@ -723,9 +729,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
{
unsigned int flits;
+ int hdrlen = is_eth_imm(skb);
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
@@ -971,6 +978,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -982,6 +990,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
/*
* The chip min packet length is 10 octets but play safe and reject
@@ -1011,7 +1020,10 @@ out_free: dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
- if (!is_eth_imm(skb) &&
+ if (is_eth_imm(skb))
+ immediate = true;
+
+ if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
goto out_free;
@@ -1028,6 +1040,8 @@ out_free: dev_kfree_skb(skb);
wr->r3 = cpu_to_be64(0);
end = (u64 *)wr + flits;
+ len = immediate ? skb->len : 0;
+ len += sizeof(*cpl);
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1049,9 @@ out_free: dev_kfree_skb(skb);
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(sizeof(*lso)));
+ FW_WR_IMMDLEN(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1054,9 +1069,6 @@ out_free: dev_kfree_skb(skb);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
@@ -1078,7 +1090,7 @@ out_free: dev_kfree_skb(skb);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
- if (is_eth_imm(skb)) {
+ if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
dev_kfree_skb(skb);
} else {
@@ -1467,8 +1479,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
{
unsigned int idx = skb_txq(skb);
- if (unlikely(is_ctrl_pkt(skb)))
+ if (unlikely(is_ctrl_pkt(skb))) {
+ /* Single ctrl queue is a requirement for LE workaround path */
+ if (adap->tids.nsftids)
+ idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ }
return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c109343d570..7ae756defc95 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
int i, ret, addr;
- int ec, sn;
+ int ec, sn, pn;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(pn, "PN");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ strim(p->pn);
/*
* Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
}
/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char *const port_type_description[] = {
+ "R XFI",
+ "R XAUI",
+ "T SGMII",
+ "T XFI",
+ "T XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "R SFP+",
+ "KR/KX",
+ "KR/KX/KX4",
+ "R QSFP_10G",
+ "",
+ "R QSFP",
+ "R BP40_BA",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -3538,6 +3572,8 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
speed = SPEED_1000;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
speed = SPEED_10000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = 40000; /* Need SPEED_40000 in ethtool.h */
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74ce0aa..9cc973fbcf26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
+ FW_PORT_TYPE_QSFP_10G,
+ FW_PORT_TYPE_QSFP,
+ FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c0983594..1d0fe9b60312 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
*/
static int enable_msix(struct adapter *adapter)
{
- int i, err, want, need;
+ int i, want, need, nqsets;
struct msix_entry entries[MSIX_ENTRIES];
struct sge *s = &adapter->sge;
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
*/
want = s->max_ethqsets + MSIX_EXTRAS;
need = adapter->params.nports + MSIX_EXTRAS;
- while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
- want = err;
- if (err == 0) {
- int nqsets = want - MSIX_EXTRAS;
- if (nqsets < s->max_ethqsets) {
- dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
- " for %d Queue Sets\n", nqsets);
- s->max_ethqsets = nqsets;
- if (nqsets < s->ethqsets)
- reduce_ethqs(adapter, nqsets);
- }
- for (i = 0; i < want; ++i)
- adapter->msix_info[i].vec = entries[i].vector;
- } else if (err > 0) {
- pci_disable_msix(adapter->pdev);
- dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
- " not using MSI-X\n", err);
+ want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+ if (want < 0)
+ return want;
+
+ nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
}
- return err;
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
static const struct net_device_ops cxgb4vf_netdev_ops = {
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b740bfce72ef..dcd58f23834a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1796,7 +1796,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->cq_count >= n + m &&
enic->intr_count >= n + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ n + m + 2, n + m + 2) > 0) {
enic->rq_count = n;
enic->wq_count = m;
@@ -1815,7 +1816,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->wq_count >= m &&
enic->cq_count >= 1 + m &&
enic->intr_count >= 1 + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ 1 + m + 2, 1 + m + 2) > 0) {
enic->rq_count = 1;
enic->wq_count = m;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f14b38b..1642de78aac8 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
+ pci_disable_device(pdev);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d09615da585..a150401a6cb3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
-#define BE_UMC_NUM_VLANS_SUPPORTED 15
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -293,7 +292,7 @@ struct be_rx_compl_info {
u8 ip_csum;
u8 l4_csum;
u8 ipv6;
- u8 vtm;
+ u8 qnq;
u8 pkt_type;
u8 ip_frag;
};
@@ -465,6 +464,7 @@ struct be_adapter {
u32 port_num;
bool promiscuous;
+ u8 mc_type;
u32 function_mode;
u32 function_caps;
u32 rx_fc; /* Rx flow control */
@@ -534,6 +534,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
return min_t(u16, num, num_online_cpus());
}
+/* Is BE in pvid_tagging mode */
+#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
+
+/* Is BE in QNQ multi-channel mode */
+#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
+ adapter->mc_type == vNIC1 || \
+ adapter->mc_type == UFP)
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 48076a6370c3..72bde5d1c358 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -3296,6 +3296,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
return NULL;
}
+static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
+ return (struct be_port_res_desc *)hdr;
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
static void be_copy_nic_desc(struct be_resources *res,
struct be_nic_res_desc *desc)
{
@@ -3439,6 +3454,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
{
struct be_cmd_resp_get_profile_config *resp;
struct be_pcie_res_desc *pcie;
+ struct be_port_res_desc *port;
struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
@@ -3466,6 +3482,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
+ port = be_get_port_desc(resp->func_param, desc_count);
+ if (port)
+ adapter->mc_type = port->mc_type;
+
nic = be_get_nic_desc(resp->func_param, desc_count);
if (nic)
be_copy_nic_desc(res, nic);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index fc4e076dc202..d0ab980f77ea 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -1098,14 +1098,6 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter)
-{
- return adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED;
-}
-
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1828,6 +1820,7 @@ struct be_cmd_req_set_ext_fat_caps {
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
+#define PORT_RESOURCE_DESC_TYPE_V1 0x55
#define MAX_RESOURCE_DESC 264
/* QOS unit number */
@@ -1891,6 +1884,33 @@ struct be_nic_res_desc {
u32 rsvd8[7];
} __packed;
+/************ Multi-Channel type ***********/
+enum mc_type {
+ MC_NONE = 0x01,
+ UMC = 0x02,
+ FLEX10 = 0x03,
+ vNIC1 = 0x04,
+ nPAR = 0x05,
+ UFP = 0x06,
+ vNIC2 = 0x07
+};
+
+struct be_port_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u8 rsvd1;
+ u8 mc_type;
+ u16 rsvd2;
+ u32 rsvd3[20];
+} __packed;
+
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->mc_type > MC_NONE;
+}
+
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 05be0070f55f..cf09d8faca84 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index dc88782185f2..28ac8dd0beaa 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 rsvd1[23]; /* dword 2 */
u8 lro_pkt; /* dword 2 */
@@ -401,7 +401,7 @@ struct amap_eth_rx_compl_v1 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 port[2]; /* dword 2 */
u8 vntagp; /* dword 2 */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 04ac9c6a0d39..4f87f5c0b03c 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -945,9 +945,9 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
- * tagging in UMC mode
+ * tagging in pvid-tagging mode
*/
- if ((adapter->function_mode & UMC_ENABLED) &&
+ if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
*skip_hw_vlan = true;
@@ -1660,7 +1660,7 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
compl);
@@ -1690,7 +1690,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
compl);
@@ -1723,9 +1723,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->l4_csum = 0;
if (rxcp->vlanf) {
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
+ /* In QNQ modes, if qnq bit is not set, then the packet was
+ * tagged only with the transparent outer vlan-tag and must
+ * not be treated as a vlan packet by host
+ */
+ if (be_is_qnq_mode(adapter) && !rxcp->qnq)
rxcp->vlanf = 0;
if (!lancer_chip(adapter))
@@ -2423,6 +2425,9 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ bool error_detected = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct net_device *netdev = adapter->netdev;
if (be_hw_error(adapter))
return;
@@ -2434,6 +2439,21 @@ void be_detect_error(struct be_adapter *adapter)
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
+ adapter->hw_error = true;
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(dev, "Firmware update in progress\n");
+ } else {
+ error_detected = true;
+ dev_err(dev, "Error detected in the card\n");
+ dev_err(dev, "ERR: sliport status 0x%x\n",
+ sliport_status);
+ dev_err(dev, "ERR: sliport error1 0x%x\n",
+ sliport_err1);
+ dev_err(dev, "ERR: sliport error2 0x%x\n",
+ sliport_err2);
+ }
}
} else {
pci_read_config_dword(adapter->pdev,
@@ -2447,51 +2467,33 @@ void be_detect_error(struct be_adapter *adapter)
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- }
-
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow the h/w to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->hw_error = true;
- /* Do not log error messages if its a FW reset */
- if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
- sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(&adapter->pdev->dev,
- "Firmware update in progress\n");
- return;
- } else {
- dev_err(&adapter->pdev->dev,
- "Error detected in the card\n");
- }
- }
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "ERR: sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error2 0x%x\n", sliport_err2);
- }
-
- if (ue_lo) {
- for (i = 0; ue_lo; ue_lo >>= 1, i++) {
- if (ue_lo & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_low_desc[i]);
- }
- }
+ /* On certain platforms BE hardware can indicate spurious UEs.
+ * Allow HW to stop working completely in case of a real UE.
+ * Hence not setting the hw_error for UE detection.
+ */
- if (ue_hi) {
- for (i = 0; ue_hi; ue_hi >>= 1, i++) {
- if (ue_hi & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_hi_desc[i]);
+ if (ue_lo || ue_hi) {
+ error_detected = true;
+ dev_err(dev,
+ "Unrecoverable Error detected in the adapter");
+ dev_err(dev, "Please reboot server to recover");
+ if (skyhawk_chip(adapter))
+ adapter->hw_error = true;
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_low_desc[i]);
+ }
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_hi_desc[i]);
+ }
}
}
-
+ if (error_detected)
+ netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2505,7 +2507,7 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, status, num_vec;
+ int i, num_vec;
struct device *dev = &adapter->pdev->dev;
/* If RoCE is supported, program the max number of NIC vectors that
@@ -2521,24 +2523,11 @@ static int be_msix_enable(struct be_adapter *adapter)
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
- if (status == 0) {
- goto done;
- } else if (status >= MIN_MSIX_VECTORS) {
- num_vec = status;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- num_vec);
- if (!status)
- goto done;
- }
-
- dev_warn(dev, "MSIx enable failed\n");
+ num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ MIN_MSIX_VECTORS, num_vec);
+ if (num_vec < 0)
+ goto fail;
- /* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
- return status;
- return 0;
-done:
if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
adapter->num_msix_roce_vec = num_vec / 2;
dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2550,6 +2539,14 @@ done:
dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
adapter->num_msix_vec);
return 0;
+
+fail:
+ dev_warn(dev, "MSIx enable failed\n");
+
+ /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+ if (!be_physfn(adapter))
+ return num_vec;
+ return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -3109,6 +3106,22 @@ err:
return status;
}
+/* Converting function_mode bits on BE3 to SH mc_type enums */
+
+static u8 be_convert_mc_type(u32 function_mode)
+{
+ if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+ return vNIC1;
+ else if (function_mode & FLEX10_MODE)
+ return FLEX10;
+ else if (function_mode & VNIC_MODE)
+ return vNIC2;
+ else if (function_mode & UMC_ENABLED)
+ return UMC;
+ else
+ return MC_NONE;
+}
+
/* On BE2/BE3 FW does not suggest the supported limits */
static void BEx_get_resources(struct be_adapter *adapter,
struct be_resources *res)
@@ -3129,12 +3142,23 @@ static void BEx_get_resources(struct be_adapter *adapter,
else
res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
- if (adapter->function_mode & FLEX10_MODE)
- res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else if (adapter->function_mode & UMC_ENABLED)
- res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
- else
+ adapter->mc_type = be_convert_mc_type(adapter->function_mode);
+
+ if (be_is_mc(adapter)) {
+ /* Assuming that there are 4 channels per port,
+ * when multi-channel is enabled
+ */
+ if (be_is_qnq_mode(adapter))
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ /* In a non-qnq multichannel mode, the pvid
+ * takes up one vlan entry
+ */
+ res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
+ } else {
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ }
+
res->max_mcast_mac = BE_MAX_MC;
/* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
@@ -4417,14 +4441,32 @@ static bool be_reset_required(struct be_adapter *adapter)
static char *mc_name(struct be_adapter *adapter)
{
- if (adapter->function_mode & FLEX10_MODE)
- return "FLEX10";
- else if (adapter->function_mode & VNIC_MODE)
- return "vNIC";
- else if (adapter->function_mode & UMC_ENABLED)
- return "UMC";
- else
- return "";
+ char *str = ""; /* default */
+
+ switch (adapter->mc_type) {
+ case UMC:
+ str = "UMC";
+ break;
+ case FLEX10:
+ str = "FLEX10";
+ break;
+ case vNIC1:
+ str = "vNIC-1";
+ break;
+ case nPAR:
+ str = "nPAR";
+ break;
+ case UFP:
+ str = "UFP";
+ break;
+ case vNIC2:
+ str = "vNIC-2";
+ break;
+ default:
+ str = "";
+ }
+
+ return str;
}
static inline char *func_name(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 9cd5415fe017..a5dae4a62bb3 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 2cd1129e19af..a3ef8f804b9e 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 549ce13b92ac..71debd1c18c9 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o \
- gianfar_sysfs.o
+ gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4782b42401b..903362a7b584 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
- napi_enable(&fep->napi);
-
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev)
fec_enet_free_buffers(ndev);
return ret;
}
+
+ napi_enable(&fep->napi);
phy_start(fep->phy_dev);
netif_start_queue(ndev);
fep->opened = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ad5a5aadc7e1..a2977a8df645 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -138,9 +138,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
-void gfar_halt(struct net_device *dev);
-static void gfar_halt_nodisable(struct net_device *dev);
-void gfar_start(struct net_device *dev);
+static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr);
@@ -338,7 +336,6 @@ static void gfar_init_mac(struct net_device *ndev)
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 rctrl = 0;
u32 tctrl = 0;
- u32 attrs = 0;
/* write the tx/rx base registers */
gfar_init_tx_rx_base(priv);
@@ -376,13 +373,6 @@ static void gfar_init_mac(struct net_device *ndev)
rctrl |= RCTRL_PADDING(priv->padding);
}
- /* Insert receive time stamps into padding alignment bytes */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
- rctrl &= ~RCTRL_PAL_MASK;
- rctrl |= RCTRL_PADDING(8);
- priv->padding = 8;
- }
-
/* Enable HW time stamping if requested from user space */
if (priv->hwts_rx_en) {
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
@@ -409,29 +399,6 @@ static void gfar_init_mac(struct net_device *ndev)
}
gfar_write(&regs->tctrl, tctrl);
-
- /* Set the extraction length and index */
- attrs = ATTRELI_EL(priv->rx_stash_size) |
- ATTRELI_EI(priv->rx_stash_index);
-
- gfar_write(&regs->attreli, attrs);
-
- /* Start with defaults, and add stashing or locking
- * depending on the approprate variables
- */
- attrs = ATTR_INIT_SETTINGS;
-
- if (priv->bd_stash_en)
- attrs |= ATTR_BDSTASH;
-
- if (priv->rx_stash_size != 0)
- attrs |= ATTR_BUFSTASH;
-
- gfar_write(&regs->attr, attrs);
-
- gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
- gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
- gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,6 +446,29 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
+static void gfar_ints_disable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
+}
+
void lock_rx_qs(struct gfar_private *priv)
{
int i;
@@ -511,7 +501,43 @@ void unlock_tx_qs(struct gfar_private *priv)
spin_unlock(&priv->tx_queue[i]->txlock);
}
-static void free_tx_pointers(struct gfar_private *priv)
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i])
+ return -ENOMEM;
+
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+ return 0;
+}
+
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i])
+ return -ENOMEM;
+
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->rx_queue[i]->rxlock));
+ }
+ return 0;
+}
+
+static void gfar_free_tx_queues(struct gfar_private *priv)
{
int i;
@@ -519,7 +545,7 @@ static void free_tx_pointers(struct gfar_private *priv)
kfree(priv->tx_queue[i]);
}
-static void free_rx_pointers(struct gfar_private *priv)
+static void gfar_free_rx_queues(struct gfar_private *priv)
{
int i;
@@ -608,6 +634,30 @@ static int gfar_parse_group(struct device_node *np,
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
}
+
+ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+ * right to left, so we need to revert the 8 bits to get the q index
+ */
+ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups
+ */
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ grp->num_rx_queues++;
+ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ priv->rx_queue[i]->grp = grp;
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ grp->num_tx_queues++;
+ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+ priv->tqueue |= (TQUEUE_EN0 >> i);
+ priv->tx_queue[i]->grp = grp;
+ }
+
priv->num_grps++;
return 0;
@@ -664,7 +714,14 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_tx_queues = num_tx_qs;
netif_set_real_num_rx_queues(dev, num_rx_qs);
priv->num_rx_queues = num_rx_qs;
- priv->num_grps = 0x0;
+
+ err = gfar_alloc_tx_queues(priv);
+ if (err)
+ goto tx_alloc_failed;
+
+ err = gfar_alloc_rx_queues(priv);
+ if (err)
+ goto rx_alloc_failed;
/* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
@@ -691,38 +748,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
goto err_grp_init;
}
- for (i = 0; i < priv->num_tx_queues; i++)
- priv->tx_queue[i] = NULL;
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i] = NULL;
-
- for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
- GFP_KERNEL);
- if (!priv->tx_queue[i]) {
- err = -ENOMEM;
- goto tx_alloc_failed;
- }
- priv->tx_queue[i]->tx_skbuff = NULL;
- priv->tx_queue[i]->qindex = i;
- priv->tx_queue[i]->dev = dev;
- spin_lock_init(&(priv->tx_queue[i]->txlock));
- }
-
- for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
- GFP_KERNEL);
- if (!priv->rx_queue[i]) {
- err = -ENOMEM;
- goto rx_alloc_failed;
- }
- priv->rx_queue[i]->rx_skbuff = NULL;
- priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = dev;
- spin_lock_init(&(priv->rx_queue[i]->rxlock));
- }
-
-
stash = of_get_property(np, "bd-stash", NULL);
if (stash) {
@@ -749,17 +774,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR;
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +808,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return 0;
-rx_alloc_failed:
- free_rx_pointers(priv);
-tx_alloc_failed:
- free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
+rx_alloc_failed:
+ gfar_free_rx_queues(priv);
+tx_alloc_failed:
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return err;
}
@@ -875,19 +899,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(priv->phydev, rq, cmd);
}
-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
-{
- unsigned int new_bit_map = 0x0;
- int mask = 0x1 << (max_qs - 1), i;
-
- for (i = 0; i < max_qs; i++) {
- if (bit_map & mask)
- new_bit_map = new_bit_map + (1 << i);
- mask = mask >> 0x1;
- }
- return new_bit_map;
-}
-
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
u32 class)
{
@@ -1005,43 +1016,10 @@ static void gfar_detect_errata(struct gfar_private *priv)
priv->errata);
}
-/* Set up the ethernet device structure, private data,
- * and anything else we need before we start
- */
-static int gfar_probe(struct platform_device *ofdev)
+static void gfar_hw_init(struct gfar_private *priv)
{
- u32 tempval;
- struct net_device *dev = NULL;
- struct gfar_private *priv = NULL;
- struct gfar __iomem *regs = NULL;
- int err = 0, i, grp_idx = 0;
- u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
- u32 isrg = 0;
- u32 __iomem *baddr;
-
- err = gfar_of_init(ofdev, &dev);
-
- if (err)
- return err;
-
- priv = netdev_priv(dev);
- priv->ndev = dev;
- priv->ofdev = ofdev;
- priv->dev = &ofdev->dev;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- spin_lock_init(&priv->bflock);
- INIT_WORK(&priv->reset_task, gfar_reset_task);
-
- platform_set_drvdata(ofdev, priv);
- regs = priv->gfargrp[0].regs;
-
- gfar_detect_errata(priv);
-
- /* Stop the DMA engine now, in case it was running before
- * (The firmware could have used it, and left it running).
- */
- gfar_halt(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval, attrs;
/* Reset MAC layer */
gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
@@ -1049,15 +1027,10 @@ static int gfar_probe(struct platform_device *ofdev)
/* We need to delay at least 3 TX clocks */
udelay(2);
- tempval = 0;
- if (!priv->pause_aneg_en && priv->tx_pause_en)
- tempval |= MACCFG1_TX_FLOW;
- if (!priv->pause_aneg_en && priv->rx_pause_en)
- tempval |= MACCFG1_RX_FLOW;
/* the soft reset bit is not self-resetting, so we need to
* clear it before resuming normal operation
*/
- gfar_write(&regs->maccfg1, tempval);
+ gfar_write(&regs->maccfg1, 0);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
@@ -1068,36 +1041,38 @@ static int gfar_probe(struct platform_device *ofdev)
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
- /* Set the dev->base_addr to the gfar reg region */
- dev->base_addr = (unsigned long) regs;
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
- /* Fill in the dev structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->mtu = 1500;
- dev->netdev_ops = &gfar_netdev_ops;
- dev->ethtool_ops = &gfar_ethtool_ops;
+ gfar_write(&regs->attreli, attrs);
- /* Register for napi ...We are registering NAPI for each grp */
- if (priv->mode == SQ_SG_MODE)
- netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
- GFAR_DEV_WEIGHT);
- else
- for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
- GFAR_DEV_WEIGHT);
+ /* Start with defaults, and add stashing
+ * depending on driver parameters
+ */
+ attrs = ATTR_INIT_SETTINGS;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
- }
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ /* FIFO configs */
+ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+ /* Program the interrupt steering regs, only for MG devices */
+ if (priv->num_grps > 1)
+ gfar_write_isrg(priv);
+}
+
+static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
priv->extended_hash = 1;
@@ -1133,68 +1108,82 @@ static int gfar_probe(struct platform_device *ofdev)
priv->hash_regs[6] = &regs->gaddr6;
priv->hash_regs[7] = &regs->gaddr7;
}
+}
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
- priv->padding = DEFAULT_PADDING;
- else
- priv->padding = 0;
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start
+ */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ int err = 0, i;
- if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ err = gfar_of_init(ofdev, &dev);
- /* Program the isrg regs only if number of grps > 1 */
- if (priv->num_grps > 1) {
- baddr = &regs->isrg0;
- for (i = 0; i < priv->num_grps; i++) {
- isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
- isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
- gfar_write(baddr, isrg);
- baddr++;
- isrg = 0x0;
- }
- }
+ if (err)
+ return err;
- /* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->dev = &ofdev->dev;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ platform_set_drvdata(ofdev, priv);
+
+ gfar_detect_errata(priv);
+
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
*/
- for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map =
- reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map =
- reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ gfar_halt(priv);
+
+ gfar_hw_init(priv);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->mtu = 1500;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ if (priv->mode == SQ_SG_MODE)
+ netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
+ GFAR_DEV_WEIGHT);
+ else
+ for (i = 0; i < priv->num_grps; i++)
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+ GFAR_DEV_WEIGHT);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
- /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups
- */
- for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
- priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
- priv->gfargrp[grp_idx].num_rx_queues++;
- priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
- rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
- rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
- }
- priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
- priv->gfargrp[grp_idx].num_tx_queues++;
- priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
- tstat = tstat | (TSTAT_CLEAR_THALT >> i);
- tqueue = tqueue | (TQUEUE_EN0 >> i);
- }
- priv->gfargrp[grp_idx].rstat = rstat;
- priv->gfargrp[grp_idx].tstat = tstat;
- rstat = tstat =0;
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- gfar_write(&regs->rqueue, rqueue);
- gfar_write(&regs->tqueue, tqueue);
+ gfar_init_addr_hash_table(priv);
+
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->padding = 8;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->needed_headroom = GMAC_FCB_LEN;
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
@@ -1251,9 +1240,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Initialize the filer table */
gfar_init_filer_table(priv);
- /* Create all the sysfs files */
- gfar_init_sysfs(dev);
-
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
@@ -1272,8 +1258,8 @@ static int gfar_probe(struct platform_device *ofdev)
register_fail:
unmap_group_regs(priv);
- free_tx_pointers(priv);
- free_rx_pointers(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
if (priv->phy_node)
of_node_put(priv->phy_node);
if (priv->tbi_node)
@@ -1293,6 +1279,8 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return 0;
@@ -1320,7 +1308,7 @@ static int gfar_suspend(struct device *dev)
lock_tx_qs(priv);
lock_rx_qs(priv);
- gfar_halt_nodisable(ndev);
+ gfar_halt_nodisable(priv);
/* Disable Tx, and Rx if wake-on-LAN is disabled. */
tempval = gfar_read(&regs->maccfg1);
@@ -1384,7 +1372,7 @@ static int gfar_resume(struct device *dev)
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
- gfar_start(ndev);
+ gfar_start(priv);
unlock_rx_qs(priv);
unlock_tx_qs(priv);
@@ -1416,7 +1404,7 @@ static int gfar_restore(struct device *dev)
init_registers(ndev);
gfar_set_mac_address(ndev);
gfar_init_mac(ndev);
- gfar_start(ndev);
+ gfar_start(priv);
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1577,19 +1565,10 @@ static void gfar_configure_serdes(struct net_device *dev)
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- int i;
-
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Clear IEVENT */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
- /* Initialize IMASK */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- }
+ gfar_ints_disable(priv);
- regs = priv->gfargrp[0].regs;
/* Init hash registers to zero */
gfar_write(&regs->igaddr0, 0);
gfar_write(&regs->igaddr1, 0);
@@ -1648,23 +1627,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
}
/* Halt the receive and transmit queues */
-static void gfar_halt_nodisable(struct net_device *dev)
+static void gfar_halt_nodisable(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int i;
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Mask all interrupts */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
-
- /* Clear all interrupts */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- }
+ gfar_ints_disable(priv);
- regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,15 +1654,20 @@ static void gfar_halt_nodisable(struct net_device *dev)
}
/* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+void gfar_halt(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- gfar_halt_nodisable(dev);
+ /* Dissable the Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, 0);
+ gfar_write(&regs->tqueue, 0);
+
+ mdelay(10);
- /* Disable Rx and Tx */
+ gfar_halt_nodisable(priv);
+
+ /* Disable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
@@ -1720,7 +1694,7 @@ void stop_gfar(struct net_device *dev)
lock_tx_qs(priv);
lock_rx_qs(priv);
- gfar_halt(dev);
+ gfar_halt(priv);
unlock_rx_qs(priv);
unlock_tx_qs(priv);
@@ -1825,17 +1799,15 @@ static void free_skb_resources(struct gfar_private *priv)
priv->tx_queue[0]->tx_bd_dma_base);
}
-void gfar_start(struct net_device *dev)
+void gfar_start(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
- /* Enable Rx and Tx in MACCFG1 */
- tempval = gfar_read(&regs->maccfg1);
- tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
- gfar_write(&regs->maccfg1, tempval);
+ /* Enable Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+ gfar_write(&regs->tqueue, priv->tqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
@@ -1852,11 +1824,16 @@ void gfar_start(struct net_device *dev)
/* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
- /* Unmask the interrupts we look for */
- gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ gfar_ints_enable(priv);
+
+ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
}
static void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1960,15 +1937,10 @@ err_irq_fail:
int startup_gfar(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = NULL;
int err, i, j;
- for (i = 0; i < priv->num_grps; i++) {
- regs= priv->gfargrp[i].regs;
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- }
+ gfar_ints_disable(priv);
- regs= priv->gfargrp[0].regs;
err = gfar_alloc_skb_resources(ndev);
if (err)
return err;
@@ -1985,7 +1957,7 @@ int startup_gfar(struct net_device *ndev)
}
/* Start the controller */
- gfar_start(ndev);
+ gfar_start(priv);
phy_start(priv->phydev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 52bb2b0195cc..2a59398f8cf0 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -880,7 +880,6 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
@@ -892,8 +891,8 @@ struct gfar {
#define DEFAULT_MAPPING 0xFF
#endif
-#define ISRG_SHIFT_TX 0x10
-#define ISRG_SHIFT_RX 0x18
+#define ISRG_RR0 0x80000000
+#define ISRG_TR0 0x00800000
/* The same driver can operate in two modes */
/* SQ_SG_MODE: Single Queue Single Group Mode
@@ -1113,6 +1112,9 @@ struct gfar_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ u32 rqueue;
+ u32 tqueue;
+
/* RX per device parameters */
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1127,11 +1129,6 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
- /* global parameters */
- unsigned int fifo_threshold;
- unsigned int fifo_starve;
- unsigned int fifo_starve_off;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,6 +1173,31 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr = &regs->isrg0;
+ u32 isrg = 0;
+ int grp_idx, i;
+
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ isrg |= (ISRG_RR0 >> i);
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ isrg |= (ISRG_TR0 >> i);
+ }
+
+ gfar_write(baddr, isrg);
+
+ baddr++;
+ isrg = 0;
+ }
+}
+
void lock_rx_qs(struct gfar_private *priv);
void lock_tx_qs(struct gfar_private *priv);
void unlock_rx_qs(struct gfar_private *priv);
@@ -1183,11 +1205,11 @@ void unlock_tx_qs(struct gfar_private *priv);
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
-void gfar_halt(struct net_device *dev);
+void gfar_halt(struct gfar_private *priv);
+void gfar_start(struct gfar_private *priv);
void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
u32 regnum, u32 read);
void gfar_configure_coalescing_all(struct gfar_private *priv);
-void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
void gfar_check_rx_parser_mode(struct gfar_private *priv);
void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 63d234419cc1..19557ec31f33 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -44,10 +44,6 @@
#include "gianfar.h"
-extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
- int rx_work_limit);
-
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -467,15 +463,13 @@ static void gfar_gringparam(struct net_device *dev,
}
/* Change the current ring parameters, stopping the controller if
- * necessary so that we don't mess things up while we're in
- * motion. We wait for the ring to be clean before reallocating
- * the rings.
+ * necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int err = 0, i = 0;
+ int err = 0, i;
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
return -EINVAL;
@@ -493,38 +487,15 @@ static int gfar_sringparam(struct net_device *dev,
return -EINVAL;
}
-
- if (dev->flags & IFF_UP) {
- unsigned long flags;
-
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
-
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
-
- /* Now we take down the rings to rebuild them */
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- }
- /* Change the size */
- for (i = 0; i < priv->num_rx_queues; i++) {
+ /* Change the sizes */
+ for (i = 0; i < priv->num_rx_queues; i++)
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree =
- priv->tx_queue[i]->tx_ring_size;
- }
/* Rebuild the rings with the new size */
if (dev->flags & IFF_UP) {
@@ -608,10 +579,8 @@ static int gfar_spauseparam(struct net_device *dev,
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
- struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err = 0, i = 0;
netdev_features_t changed = dev->features ^ features;
+ int err = 0;
if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
gfar_vlan_mode(dev, features);
@@ -620,23 +589,6 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
return 0;
if (dev->flags & IFF_UP) {
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
-
- unlock_tx_qs(priv);
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
-
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644
index e02dd1378751..000000000000
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * drivers/net/ethernet/freescale/gianfar_sysfs.c
- *
- * Gianfar Ethernet Driver
- * This driver is designed for the non-CPM ethernet controllers
- * on the 85xx and 83xx family of integrated processors
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
- *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Sysfs file creation and management
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-
-#include "gianfar.h"
-
-static ssize_t gfar_show_bd_stash(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
-}
-
-static ssize_t gfar_set_bd_stash(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int new_setting = 0;
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
- return count;
-
-
- /* Find out the new setting */
- if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
- new_setting = 1;
- else if (!strncmp("off", buf, count - 1) ||
- !strncmp("0", buf, count - 1))
- new_setting = 0;
- else
- return count;
-
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- /* Set the new stashing value */
- priv->bd_stash_en = new_setting;
-
- temp = gfar_read(&regs->attr);
-
- if (new_setting)
- temp |= ATTR_BDSTASH;
- else
- temp &= ~(ATTR_BDSTASH);
-
- gfar_write(&regs->attr, temp);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
-
-static ssize_t gfar_show_rx_stash_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_size);
-}
-
-static ssize_t gfar_set_rx_stash_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (length > priv->rx_buffer_size)
- goto out;
-
- if (length == priv->rx_stash_size)
- goto out;
-
- priv->rx_stash_size = length;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EL_MASK;
- temp |= ATTRELI_EL(length);
- gfar_write(&regs->attreli, temp);
-
- /* Turn stashing on/off as appropriate */
- temp = gfar_read(&regs->attr);
-
- if (length)
- temp |= ATTR_BUFSTASH;
- else
- temp &= ~(ATTR_BUFSTASH);
-
- gfar_write(&regs->attr, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
- gfar_set_rx_stash_size);
-
-/* Stashing will only be enabled when rx_stash_size != 0 */
-static ssize_t gfar_show_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_index);
-}
-
-static ssize_t gfar_set_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned short index = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (index > priv->rx_stash_size)
- goto out;
-
- if (index == priv->rx_stash_index)
- goto out;
-
- priv->rx_stash_index = index;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EI_MASK;
- temp |= ATTRELI_EI(index);
- gfar_write(&regs->attreli, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
- gfar_set_rx_stash_index);
-
-static ssize_t gfar_show_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_threshold);
-}
-
-static ssize_t gfar_set_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (length > GFAR_MAX_FIFO_THRESHOLD)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_threshold = length;
-
- temp = gfar_read(&regs->fifo_tx_thr);
- temp &= ~FIFO_TX_THR_MASK;
- temp |= length;
- gfar_write(&regs->fifo_tx_thr, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
- gfar_set_fifo_threshold);
-
-static ssize_t gfar_show_fifo_starve(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve);
-}
-
-static ssize_t gfar_set_fifo_starve(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve = num;
-
- temp = gfar_read(&regs->fifo_tx_starve);
- temp &= ~FIFO_TX_STARVE_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
- gfar_set_fifo_starve);
-
-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve_off);
-}
-
-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE_OFF)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve_off = num;
-
- temp = gfar_read(&regs->fifo_tx_starve_shutoff);
- temp &= ~FIFO_TX_STARVE_OFF_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve_shutoff, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
- gfar_set_fifo_starve_off);
-
-void gfar_init_sysfs(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- int rc;
-
- /* Initialize the default values */
- priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
- priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
- priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
-
- /* Create our sysfs files */
- rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
- if (rc)
- dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
-}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d91933c4cdd..e6f8961d49eb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2038,13 +2038,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
msix_entry),
GFP_KERNEL);
if (adapter->msix_entries) {
+ struct e1000_adapter *a = adapter;
+
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- adapter->num_vectors);
- if (err == 0)
+ err = pci_enable_msix_range(a->pdev,
+ a->msix_entries,
+ a->num_vectors,
+ a->num_vectors);
+ if (err > 0)
return;
}
/* MSI-X failed, so fall through and try MSI */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index a50e6b3479ae..ed3902bf249b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
ntc++;
if (ntc == asq->count)
ntc = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b901371ca361..53f3ed2df796 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,7 +38,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 30
+#define DRV_VERSION_BUILD 32
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -305,6 +305,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
break;
}
@@ -3107,13 +3108,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
- ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
-
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
/* Skip if the queue is already in the requested state */
if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
@@ -3123,8 +3124,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable) {
wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
} else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
}
@@ -3171,12 +3171,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
- ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
if (enable) {
/* is STAT set ? */
@@ -3190,11 +3191,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
else
- rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK);
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* wait for the change to finish */
@@ -5331,6 +5330,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
+ if (pf->num_alloc_vfs) {
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_reset_vf(&pf->vf[v], true);
+ }
+
/* tell the firmware that we're starting */
dv.major_version = DRV_VERSION_MAJOR;
dv.minor_version = DRV_VERSION_MINOR;
@@ -5850,37 +5854,16 @@ err_out:
**/
static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
{
- int err = 0;
-
- pf->num_msix_entries = 0;
- while (vectors >= I40E_MIN_MSIX) {
- err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
- if (err == 0) {
- /* good to go */
- pf->num_msix_entries = vectors;
- break;
- } else if (err < 0) {
- /* total failure */
- dev_info(&pf->pdev->dev,
- "MSI-X vector reservation failed: %d\n", err);
- vectors = 0;
- break;
- } else {
- /* err > 0 is the hint for retry */
- dev_info(&pf->pdev->dev,
- "MSI-X vectors wanted %d, retrying with %d\n",
- vectors, err);
- vectors = err;
- }
- }
-
- if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ I40E_MIN_MSIX, vectors);
+ if (vectors < 0) {
dev_info(&pf->pdev->dev,
- "Couldn't get enough vectors, only %d available\n",
- vectors);
+ "MSI-X vector reservation failed: %d\n", vectors);
vectors = 0;
}
+ pf->num_msix_entries = vectors;
+
return vectors;
}
@@ -5942,7 +5925,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
@@ -6071,7 +6054,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
- dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6063,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
- dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6090,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_intr, 0, pf->misc_int_name, pf);
if (err) {
dev_info(&pf->pdev->dev,
- "request_irq for msix_misc failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ pf->misc_int_name, err);
return -EFAULT;
}
}
@@ -8070,6 +8054,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
i40e_flush(hw);
+
+ if (pci_num_vf(pdev)) {
+ dev_info(&pdev->dev,
+ "Active VFs found, allocating resources.\n");
+ err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+ if (err)
+ dev_info(&pdev->dev,
+ "Error %d allocating resources for existing VFs\n",
+ err);
+ }
}
pfs_found++;
@@ -8165,16 +8159,16 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4bb482b1a7f..19af4ce0a4fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -892,7 +892,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b9d1c1c8ca5a..189e250198dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -408,18 +408,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not allocate VF broadcast filter\n");
}
- if (!f) {
- dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
- ret = -ENOMEM;
- goto error_alloc_vsi_res;
- }
-
/* program mac filter */
ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
+ if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
- goto error_alloc_vsi_res;
- }
error_alloc_vsi_res:
return ret;
@@ -679,9 +671,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
complete_reset:
/* reallocate vf resources to reset the VSI state */
i40e_free_vf_res(vf);
- mdelay(10);
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +839,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
*
* allocate vf resources
**/
-static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
struct i40e_vf *vfs;
int i, ret = 0;
@@ -855,14 +847,16 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* Disable interrupt 0 so we don't try to handle the VFLR. */
i40e_irq_dynamic_disable_icr0(pf);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "pci_enable_sriov failed with error %d!\n", ret);
- pf->num_alloc_vfs = 0;
- goto err_iov;
+ /* Check to see if we're just allocating resources for extant VFs */
+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to enable SR-IOV, error %d.\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
}
-
/* allocate memory */
vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
@@ -1873,7 +1867,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- i40e_reset_vf(vf, true);
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ i40e_reset_vf(vf, true);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index cc1feee36e12..bedf0ba21d74 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -102,6 +102,7 @@ struct i40e_vf {
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1bca38d..97662b6bd98a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
- __le32 tenant_id ;
+ __le32 tenant_id;
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d853db..827bb5fa4af9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -722,7 +722,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
@@ -807,8 +807,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac06592f..092aace2a76c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define ETH_ALEN 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b288a1..ccb43d343543 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/udp.h>
-#include <linux/sctp.h>
-
#include "i40e_type.h"
#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
/* Driver state. The order of these is important! */
enum i40evf_state_t {
__I40EVF_STARTUP, /* driver loaded, probe complete */
- __I40EVF_FAILED, /* PF communication failed. Fatal. */
__I40EVF_REMOVE, /* driver is being unloaded */
__I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__I40EVF_INIT_SW, /* got resources, setting up structs */
+ __I40EVF_RESETTING, /* in reset */
/* Below here, watchdog is running */
__I40EVF_DOWN, /* ready, can be opened */
__I40EVF_TESTING, /* in ethtool self-test */
- __I40EVF_RESETTING, /* in reset */
__I40EVF_RUNNING, /* opened, working */
};
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
- struct vlan_group *vlgrp;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
struct list_head vlan_filter_list;
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
-
- /* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ char misc_vector_name[IFNAMSIZ + 9];
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
struct list_head mac_filter_list;
-#ifdef DEBUG
- bool detect_tx_hung;
-#endif /* DEBUG */
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
- int txd_count;
- int rxd_count;
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
- u64 non_eop_descs;
int num_msix_vectors;
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
-
- u32 init_state;
- volatile unsigned long flags;
+ u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
+#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
/* duplcates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +228,19 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
- /* structs defined in i40e_vf.h */
- struct i40e_hw hw;
+ struct i40e_hw hw; /* defined in i40e_type.h */
enum i40evf_state_t state;
volatile unsigned long crit_section;
- u64 tx_busy;
struct work_struct watchdog_task;
bool netdev_registered;
- bool dev_closed;
bool link_up;
enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +251,6 @@ struct i40evf_adapter {
u32 aq_wait_count;
};
-struct i40evf_info {
- enum i40e_mac_type mac;
- unsigned int flags;
-};
-
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -315,6 +285,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4bf5ac0..8b0db1ce179c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
+ int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->txd_count) &&
- (new_rx_count == adapter->rxd_count))
+ if ((new_tx_count == adapter->tx_rings[0]->count) &&
+ (new_rx_count == adapter->rx_rings[0]->count))
return 0;
- adapter->txd_count = new_tx_count;
- adapter->rxd_count = new_rx_count;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[0]->count = new_tx_count;
+ adapter->rx_rings[0]->count = new_rx_count;
+ }
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf4419243..b2c03bca7929 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.11"
+#define DRV_VERSION "0.9.13"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2014 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@@ -167,9 +167,13 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
-
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+ i40evf_request_reset(adapter);
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ schedule_work(&adapter->reset_task);
+ }
}
/**
@@ -211,6 +215,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
int i;
struct i40e_hw *hw = &adapter->hw;
+ if (!adapter->msix_entries)
+ return;
+
for (i = 1; i < adapter->num_msix_vectors; i++) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +518,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
- sprintf(adapter->name[0], "i40evf:mbx");
+ sprintf(adapter->misc_vector_name, "i40evf:mbx");
err = request_irq(adapter->msix_entries[0].vector,
- &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ &i40evf_msix_aq, 0,
+ adapter->misc_vector_name, netdev);
if (err) {
dev_err(&adapter->pdev->dev,
- "request_irq for msix_aq failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ adapter->misc_vector_name, err);
free_irq(adapter->msix_entries[0].vector, netdev);
}
return err;
@@ -963,16 +972,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
- /* remove all MAC filters from the VSI */
+ /* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
}
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- /* disable receives */
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
-
+ /* remove all VLAN filters */
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ f->remove = true;
+ }
+ if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+ adapter->state != __I40EVF_RESETTING) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+ }
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
@@ -1291,19 +1307,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
- if (adapter->state < __I40EVF_DOWN)
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto restart_watchdog;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_info(&adapter->pdev->dev, "Checking for redemption\n");
+ if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+ /* A chance for redemption! */
+ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+ adapter->state = __I40EVF_STARTUP;
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ schedule_delayed_work(&adapter->init_task, 10);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ /* Don't reschedule the watchdog, since we've restarted
+ * the init task. When init_task contacts the PF and
+ * gets everything set up again, it'll restart the
+ * watchdog for us. Down, boy. Sit. Stay. Woof.
+ */
+ return;
+ }
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
+ }
- if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ if ((adapter->state < __I40EVF_DOWN) ||
+ (adapter->flags & I40EVF_FLAG_RESET_PENDING))
goto watchdog_done;
- /* check for unannounced reset */
- if ((adapter->state != __I40EVF_RESETTING) &&
+ /* check for reset */
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
- __func__);
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1358,13 +1402,15 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_irq_enable(adapter, true);
i40evf_fire_sw_int(adapter, 0xFF);
+
watchdog_done:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
if (adapter->aq_required)
mod_timer(&adapter->watchdog_timer,
jiffies + msecs_to_jiffies(20));
else
mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
schedule_work(&adapter->adminq_task);
}
@@ -1411,6 +1457,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
i40e_flush(hw);
}
+#define I40EVF_RESET_WAIT_MS 100
+#define I40EVF_RESET_WAIT_COUNT 200
/**
* i40evf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
@@ -1421,8 +1469,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
**/
static void i40evf_reset_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter =
- container_of(work, struct i40evf_adapter, reset_task);
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ reset_task);
struct i40e_hw *hw = &adapter->hw;
int i = 0, err;
uint32_t rstat_val;
@@ -1430,22 +1479,56 @@ static void i40evf_reset_task(struct work_struct *work)
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section))
udelay(500);
+ /* poll until we see the reset actually happen */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val != I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset now occurring\n");
+ break;
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
+ }
+ if (i == I40EVF_RESET_WAIT_COUNT) {
+ dev_err(&adapter->pdev->dev, "Reset was not detected\n");
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ goto continue_reset; /* act like the reset happened */
+ }
- /* wait until the reset is complete */
- for (i = 0; i < 20; i++) {
+ /* wait until the reset is complete and the PF is responding to us */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_COMPLETED)
+ if (rstat_val == I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
break;
- else
- mdelay(100);
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
}
- if (i == 20) {
+ if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
- __func__, rstat_val);
- /* carry on anyway */
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ rstat_val);
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev))
+ i40evf_close(adapter->netdev);
+
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return; /* Do not attempt to reinit. It's dead, Jim. */
}
+
+continue_reset:
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
i40evf_down(adapter);
adapter->state = __I40EVF_RESETTING;
@@ -1505,6 +1588,9 @@ static void i40evf_adminq_task(struct work_struct *work)
i40e_status ret;
u16 pending;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return;
+
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf) {
@@ -1636,6 +1722,10 @@ static int i40evf_open(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
int err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+ return -EIO;
+ }
if (adapter->state != __I40EVF_DOWN)
return -EBUSY;
@@ -1690,8 +1780,12 @@ static int i40evf_close(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (adapter->state <= __I40EVF_DOWN)
+ return 0;
+
/* signal that we are down to the interrupt handler */
adapter->state = __I40EVF_DOWN;
+
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
@@ -1842,16 +1936,18 @@ static void i40evf_init_task(struct work_struct *work)
switch (adapter->state) {
case __I40EVF_STARTUP:
/* driver loaded, probe complete */
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
err = i40e_set_mac_type(hw);
if (err) {
- dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+ err);
goto err;
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
- __func__, err);
+ dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1957,14 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_init_adminq(hw);
if (err) {
- dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ err);
goto err;
}
err = i40evf_send_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n",
+ err);
i40evf_shutdown_adminq(hw);
goto err;
}
@@ -1882,13 +1978,13 @@ static void i40evf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
err);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ dev_err(&pdev->dev, "Unable send config request (%d)\n",
err);
goto err;
}
@@ -1902,18 +1998,15 @@ static void i40evf_init_task(struct work_struct *work)
(I40E_MAX_VF_VSI *
sizeof(struct i40e_virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
- if (!adapter->vf_res) {
- dev_err(&pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!adapter->vf_res)
goto err;
- }
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
goto restart;
if (err) {
- dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+ err);
goto err_alloc;
}
adapter->state = __I40EVF_INIT_SW;
@@ -1927,20 +2020,17 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi_res = &adapter->vf_res->vsi_res[i];
}
if (!adapter->vsi_res) {
- dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ dev_err(&pdev->dev, "No LAN VSI found\n");
goto err_alloc;
}
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->txd_count = I40EVF_DEFAULT_TXD;
- adapter->rxd_count = I40EVF_DEFAULT_RXD;
-
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
-
- netdev->features |= NETIF_F_SG |
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_SCTP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -1956,11 +2046,9 @@ static void i40evf_init_task(struct work_struct *work)
NETIF_F_HW_VLAN_CTAG_FILTER;
}
- /* The HW MAC address was set and/or determined in sw_init */
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev,
- "Invalid MAC address %pMAC, using random\n",
- adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2082,6 @@ static void i40evf_init_task(struct work_struct *work)
netif_carrier_off(netdev);
- strcpy(netdev->name, "eth%d");
-
adapter->vsi.id = adapter->vsi_res->vsi_id;
adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
adapter->vsi.back = adapter;
@@ -2005,9 +2091,11 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
adapter->vsi.netdev = adapter->netdev;
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ if (!adapter->netdev_registered) {
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+ }
adapter->netdev_registered = true;
@@ -2031,17 +2119,16 @@ err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
i40evf_reset_interrupt_capability(adapter);
- adapter->state = __I40EVF_FAILED;
err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw); /* ignore error */
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
- if (hw->aq.asq.count)
- i40evf_shutdown_adminq(hw); /* ignore error */
- adapter->state = __I40EVF_FAILED;
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,20 +2171,18 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct i40evf_adapter *adapter = NULL;
struct i40e_hw *hw = NULL;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = true;
/* coherent mask for the same size will always succeed if
* dma_set_mask does
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- pci_using_dac = false;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
} else {
dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
@@ -2128,8 +2213,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -2271,6 +2354,7 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40e_hw *hw = &adapter->hw;
cancel_delayed_work_sync(&adapter->init_task);
+ cancel_work_sync(&adapter->reset_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -2278,17 +2362,15 @@ static void i40evf_remove(struct pci_dev *pdev)
}
adapter->state = __I40EVF_REMOVE;
- if (adapter->num_msix_vectors) {
+ if (adapter->msix_entries) {
i40evf_misc_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
-
- flush_scheduled_work();
-
i40evf_free_misc_irq(adapter);
-
i40evf_reset_interrupt_capability(adapter);
}
+ del_timer_sync(&adapter->watchdog_timer);
+ flush_scheduled_work();
+
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d79e62b..e294f012647d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return 0; /* nothing to see here, move along */
+
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+ /* Don't check CURRENT_OP - this is always higher priority */
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
/**
* i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- adapter->state = __I40EVF_RESETTING;
- schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev,
- "%s: hardware reset pending\n", __func__);
+ dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ schedule_work(&adapter->reset_task);
+ }
break;
default:
dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 46d31a49f5ea..84dfa3f0e3b8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1111,10 +1111,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- numvecs);
- if (err == 0)
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
return;
igb_reset_interrupt_capability(adapter);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 675435fc2e53..e2c6d8059b74 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
for (i = 0; i < 3; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries, 3);
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries, 3, 3);
}
- if (err) {
+ if (err < 0) {
/* MSI-X failed */
dev_err(&adapter->pdev->dev,
"Failed to initialize MSI-X interrupts.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 32e3eaaa160a..0834e1ea44bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -698,7 +698,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
- int err, vector_threshold;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +712,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold) {
+ if (vectors < 0) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d4ada72dfd0..18076c4178b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
#ifdef IXGBE_FCOE
@@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq + f->offset;
#else
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
#endif
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9df28985eba7..a6af7b7c59b1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1817,7 +1817,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
int vectors)
{
- int err = 0;
int vector_threshold;
/* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1830,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err || err < 0) /* Success or a nasty failure. */
- break;
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold)
- err = -ENOMEM;
-
- if (err) {
+ if (vectors < 0) {
dev_err(&adapter->pdev->dev,
"Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- } else {
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return vectors;
}
- return err;
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 8f9266c64c75..fd4b6aecf6ee 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
static u16
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/* we are currently only using the first queue */
return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 6300fd27f2db..68e6a6613e9a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,12 +43,12 @@ config MVMDIO
This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA
- tristate "Marvell Armada 370/XP network interface support"
- depends on MACH_ARMADA_370_XP
+ tristate "Marvell Armada 370/38x/XP network interface support"
+ depends on PLAT_ORION
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA XP and ARMADA 370 SoC family.
+ Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
Note that this driver is distinct from the mv643xx_eth
driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f418f4f20f94..12c6a66e54d1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2784,7 +2784,6 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
- int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2845,18 +2844,12 @@ static int mvneta_probe(struct platform_device *pdev)
}
/* Alloc per-cpu stats */
- pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
goto err_unmap;
}
- for_each_possible_cpu(cpu) {
- struct mvneta_pcpu_stats *stats;
- stats = per_cpu_ptr(pp->stats, cpu);
- u64_stats_init(&stats->syncp);
- }
-
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8e8a7eb43a2c..13457032d15f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
if (vlan_tx_tag_present(skb))
up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
- return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
+ return fallback(dev, skb) % rings_p_up + up * rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d711158b0d4b..218b759c506e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1976,7 +1976,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int nreq = min_t(int, dev->caps.num_ports *
min_t(int, netif_get_num_default_rss_queues() + 1,
MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
- int err;
int i;
if (msi_x) {
@@ -1990,23 +1989,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
- retry:
- err = pci_enable_msix(dev->pdev, entries, nreq);
- if (err) {
- /* Try again if at least 2 vectors are available */
- if (err > 1) {
- mlx4_info(dev, "Requested %d vectors, "
- "but only %d MSI-X vectors available, "
- "trying again\n", nreq, err);
- nreq = err;
- goto retry;
- }
+ nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+
+ if (nreq < 0) {
kfree(entries);
goto no_msi;
- }
-
- if (nreq <
- MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ } else if (nreq < MSIX_LEGACY_SZ +
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3af04c3f42ea..9ca223bc90fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
config MLX5_CORE
tristate
- depends on PCI && X86
+ depends on PCI
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a064f06e0cb8..81df046a6d69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &dev->priv.eq_table;
int num_eqs = 1 << dev->caps.log_max_eq;
int nvec;
- int err;
int i;
nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
for (i = 0; i < nvec; i++)
table->msix_arr[i].entry = i;
-retry:
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
- err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
- if (err <= 0) {
- return err;
- } else if (err > 2) {
- nvec = err;
- goto retry;
- }
+ nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ MLX5_EQ_VEC_COMP_BASE, nvec);
+ if (nvec < 0)
+ return nvec;
- mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 68026f7e8ba3..130f6b204efa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
- status =
- pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- mgp->msix_enabled = 1;
- } else {
+ status = pci_enable_msix_range(pdev, mgp->msix_vectors,
+ mgp->num_slices, mgp->num_slices);
+ if (status < 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
+ mgp->msix_enabled = 1;
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
- goto disable_msix;
+ goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
- /* make sure it is a power of two */
- while (!is_power_of_2(mgp->num_slices))
- mgp->num_slices--;
+ mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
if (mgp->num_slices == 1)
- goto disable_msix;
- status = pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- pci_disable_msix(pdev);
+ goto no_msix;
+ status = pci_enable_msix_range(pdev,
+ mgp->msix_vectors,
+ mgp->num_slices,
+ mgp->num_slices);
+ if (status < 0)
+ goto no_msix;
+
+ pci_disable_msix(pdev);
+
+ if (status == mgp->num_slices) {
if (old_allocated)
kfree(old_fw);
return;
- }
- if (status > 0)
+ } else {
mgp->num_slices = status;
- else
- goto disable_msix;
+ }
}
-disable_msix:
+no_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd0b2c7..56e3a9d42bb2 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3792,9 +3792,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
- ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
+ ret = pci_enable_msix_range(nic->pdev, nic->entries,
+ nic->num_entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
- if (ret) {
+ if (ret < 0) {
DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
kfree(nic->entries);
swstats->mem_freed += nic->num_entries *
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e46e8698e630..c83cedd26dec 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2349,12 +2349,18 @@ start:
vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret > 0) {
+ ret = pci_enable_msix_range(vdev->pdev,
+ vdev->entries, 3, vdev->intr_cnt);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto enable_msix_failed;
+ } else if (ret < vdev->intr_cnt) {
+ pci_disable_msix(vdev->pdev);
+
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
+ if (max_config_vpath != VXGE_USE_DEFAULT) {
ret = -ENODEV;
goto enable_msix_failed;
}
@@ -2368,9 +2374,6 @@ start:
vxge_close_vpaths(vdev, temp);
vdev->no_of_vpath = temp;
goto start;
- } else if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
}
return 0;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 70cf97fe67f2..bad3c057ee8a 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- int ret = 1;
+ int ret;
int i;
irqreturn_t (*handler)(int foo, void *data);
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
- ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
- if (ret == 0) {
+ ret = pci_enable_msix_range(np->pci_dev,
+ np->msi_x_entry,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK);
+ if (ret > 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
- nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
+ nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for rx %d\n",
ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for tx handling */
sprintf(np->name_tx, "%s-tx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
- nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
+ nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for tx %d\n",
ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for link and timer handling */
sprintf(np->name_other, "%s-other", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
- nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
+ nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for link %d\n",
ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
+ handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed %d\n",
ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap1);
}
netdev_info(dev, "MSI-X enabled\n");
+ return 0;
}
}
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if (np->msi_flags & NV_MSI_CAPABLE) {
ret = pci_enable_msi(np->pci_dev);
if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev, "request_irq failed %d\n",
ret);
pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
netdev_info(dev, "MSI enabled\n");
+ return 0;
}
}
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
- goto out_err;
- }
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
+ goto out_err;
return 0;
out_free_tx:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 70849dea32b1..f09c35d669b3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
if (adapter->msix_supported) {
netxen_init_msix_entries(adapter, num_msix);
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
netxen_set_msix_bit(pdev, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ba78c7481fa3..e07fd948d98b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -684,7 +684,7 @@ restore:
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1, vector;
+ int err, vector;
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +701,17 @@ enable_msix:
for (vector = 0; vector < num_msix; vector++)
adapter->msix_entries[vector].entry = vector;
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+
+ if (err == num_msix) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return err;
+ return 0;
} else if (err > 0) {
+ pci_disable_msix(pdev);
+
dev_info(&pdev->dev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
num_msix, err);
@@ -715,12 +719,12 @@ enable_msix:
if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
if (err < QLCNIC_82XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
} else {
num_msix = rounddown_pow_of_two(err - 1);
num_msix += 1;
if (err < QLCNIC_83XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
}
if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +751,7 @@ enable_msix:
}
}
- return err;
+ return -EIO;
}
static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ce2cfddbed50..adf87d26e68f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3331,24 +3331,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
- /* Loop to get our vectors. We start with
- * what we want and settle for what we get.
- */
- do {
- err = pci_enable_msix(qdev->pdev,
- qdev->msi_x_entry, qdev->intr_count);
- if (err > 0)
- qdev->intr_count = err;
- } while (err > 0);
-
+ err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+ 1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
- qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
- } else if (err == 0) {
+ } else {
+ qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 040cb94e8219..b1e6554f44bc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,8 @@
/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2008-2014 Renesas Solutions Corp.
+ * Copyright (C) 2013-2014 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,10 @@
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
@@ -2098,8 +2102,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETHERSMALL)
- txdesc->buffer_length = ETHERSMALL;
+ if (skb->len < ETH_ZLEN)
+ txdesc->buffer_length = ETH_ZLEN;
else
txdesc->buffer_length = skb->len;
@@ -2710,6 +2714,54 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_change_mtu = eth_change_mtu,
};
+#ifdef CONFIG_OF
+static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct sh_eth_plat_data *pdata;
+ struct device_node *phy;
+ const char *mac_addr;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->phy_interface = of_get_phy_mode(np);
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (of_property_read_u32(phy, "reg", &pdata->phy))
+ return NULL;
+ pdata->phy_irq = irq_of_parse_and_map(phy, 0);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+
+ pdata->no_ether_link =
+ of_property_read_bool(np, "renesas,no-ether-link");
+ pdata->ether_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ return pdata;
+}
+
+static const struct of_device_id sh_eth_match_table[] = {
+ { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_eth_match_table);
+#else
+static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int sh_eth_drv_probe(struct platform_device *pdev)
{
int ret, devno = 0;
@@ -2763,6 +2815,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ if (pdev->dev.of_node)
+ pd = sh_eth_parse_dt(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "no platform data\n");
ret = -EINVAL;
@@ -2778,7 +2832,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->ether_link_active_low = pd->ether_link_active_low;
/* set cpu data */
- mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ if (id) {
+ mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(sh_eth_match_table),
+ &pdev->dev);
+ mdp->cd = (struct sh_eth_cpu_data *)match->data;
+ }
mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
sh_eth_set_default_cpu_data(mdp->cd);
@@ -2920,6 +2982,7 @@ static struct platform_driver sh_eth_driver = {
.driver = {
.name = CARDNAME,
.pm = SH_ETH_PM_OPS,
+ .of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 6075915b88ec..d55e37cd5fec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,8 +27,7 @@
#define RX_RING_MIN 64
#define TX_RING_MAX 1024
#define RX_RING_MAX 1024
-#define ETHERSMALL 60
-#define PKT_BUF_SZ 1538
+#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
#define SH_ETH_TSU_CAM_ENTRIES 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 174a92f5fe51..3b397987119d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However we need
- * multiple TX queues per channel.
+ /* We can have one VI for each 8K region. However, until we
+ * use TX option descriptors we need two TX queues per channel.
*/
efx->max_channels =
min_t(unsigned int,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 207ac9a1e3de..62a55dde61d5 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -227,36 +227,6 @@
#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
-/* RX_USER_DESC */
-#define ESF_DZ_RX_USR_RESERVED_LBN 62
-#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
-#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
-
/* TX_CSUM_TSTAMP_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
@@ -338,37 +308,6 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_USER_DESC */
-#define ESF_DZ_TX_USR_TYPE_LBN 63
-#define ESF_DZ_TX_USR_TYPE_WIDTH 1
-#define ESF_DZ_TX_USR_CONT_LBN 62
-#define ESF_DZ_TX_USR_CONT_WIDTH 1
-#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
/*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 83d464347021..d72e0038a740 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
- channel->n_rx_frm_trunc = 0;
-
return 0;
fail:
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
- if (rc > 0) {
+ rc = pci_enable_msix_range(efx->pci_dev,
+ xentries, 1, n_channels);
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ } else if (rc < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %u).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
- rc = pci_enable_msix(efx->pci_dev, xentries,
- n_channels);
}
- if (rc == 0) {
+ if (rc > 0) {
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
- } else {
- /* Fall back to single channel MSI */
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
}
}
@@ -2115,7 +2111,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
- char *new_addr = addr->sa_data;
+ u8 *new_addr = addr->sa_data;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -3273,6 +3269,6 @@ module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
-MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index dbd7b78fe01c..99032581336f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,7 +14,7 @@
#include "net_driver.h"
#include "filter.h"
-/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
/* TX */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 229428915aa8..89fcaffd7de2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
* @test_index: Starting index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
*/
static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
* @tests: Efx self-test results structure, or %NULL
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
- int already_up;
+ bool already_up;
int rc = -ENOMEM;
efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail;
if (efx->state != STATE_READY) {
- rc = -EIO;
- goto fail1;
+ rc = -EBUSY;
+ goto out;
}
netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail1;
+ goto out;
}
}
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-fail1:
- /* Fill ethtool results structures */
+out:
efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
kfree(efx_tests);
fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 18d6f761f4d0..72652f380243 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
}
-
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
+
/**************************************************************************
*
* RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
case 100: link_speed = 1; break;
default: link_speed = 0; break;
}
+
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
-
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f72489a105ca..aa1b169f45ec 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
*/
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{
-
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
@@ -1609,7 +1608,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index af2b8c59a903..8a400a0595eb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
-
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 79226b19e3c4..32d969e857f7 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}
-
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index eb75fbd11a01..28275e395cb8 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
- * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
- bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
return -EAGAIN;
}
- /* Convert the NIC time into kernel time. No correction is required-
- * this time is the output of a firmware process.
- */
- mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
- ptp->timeset[last_good].minor, 0);
-
- /* Calculate delay from actual PPS to last_time */
- delta = ktime_to_timespec(mc_time);
- delta.tv_nsec +=
- last_time->ts_real.tv_nsec -
- (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
-
- /* It is possible that the seconds rolled over between taking
+ /* Calculate delay from last good sync (host time) to last_time.
+ * It is possible that the seconds rolled over between taking
* the start reading and the last value written by the host. The
* timescales are such that a gap of more than one second is never
- * expected.
+ * expected. delta is *not* normalised.
*/
start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
- if (start_sec != last_sec) {
- if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
- netif_warn(efx, hw, efx->net_dev,
- "PTP bad synchronisation seconds\n");
- return -EAGAIN;
- } else {
- delta.tv_sec = 1;
- }
- } else {
- delta.tv_sec = 0;
+ if (start_sec != last_sec &&
+ ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
}
+ delta.tv_sec = (last_sec - start_sec) & 1;
+ delta.tv_nsec =
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* Convert the NIC time at last good sync into kernel time.
+ * No correction is required - this time is the output of a
+ * firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
+
+ /* Calculate delay from NIC top of second to last_time */
+ delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+ /* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time;
pps_sub_ts(&ptp->host_time_pps, delta);
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
- ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
@@ -1253,7 +1239,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
- ptp->evt_overflow = false;
/* Get the NIC PTP attributes and set up time conversions */
rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1365,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
u8 *match_data_012, *match_data_345;
unsigned int version;
+ u8 *data;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
@@ -1388,7 +1374,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
return false;
}
- version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+ data = skb->data;
+ version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
return false;
}
@@ -1396,13 +1383,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* PTP V1 uses all six bytes of the UUID to match the packet
* to the timestamp
*/
- match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
- match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
+ match_data_012 = data + PTP_V1_UUID_OFFSET;
+ match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
} else {
if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false;
}
- version = skb->data[PTP_V2_VERSION_OFFSET];
+ data = skb->data;
+ version = data[PTP_V2_VERSION_OFFSET];
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
return false;
}
@@ -1414,17 +1402,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
* enhanced mode fixes this issue and uses bytes 0-2
* and byte 5-7 of the UUID.
*/
- match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
if (ptp->mode == MC_CMD_PTP_MODE_V2) {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
} else {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
/* Does this packet require timestamping? */
- if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
match->state = PTP_PACKET_STATE_UNMATCHED;
/* We expect the sequence number to be in the same position in
@@ -1440,8 +1428,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
(match_data_345[0] << 24));
match->words[1] = (match_data_345[1] |
(match_data_345[2] << 8) |
- (skb->data[PTP_V1_SEQUENCE_OFFSET +
- PTP_V1_SEQUENCE_LENGTH - 1] <<
+ (data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
} else {
match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1623,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else if (!ptp->evt_overflow) {
- /* Log a warning message and set the event overflow flag.
- * The message won't be logged again until the event queue
- * becomes empty.
- */
+ } else if (net_ratelimit()) {
+ /* Log a rate-limited warning message. */
netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
- ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 75d11fa4eb0a..fa9475300411 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* Requires TX checksum offload support.
*/
-/* Number of bytes inserted at the start of a TSO header buffer,
- * similar to NET_IP_ALIGN.
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define TSOH_OFFSET 0
-#else
-#define TSOH_OFFSET NET_IP_ALIGN
-#endif
-
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
/**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
- if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+ if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
unsigned index =
(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct efx_buffer *page_buf =
&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
unsigned offset =
- TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+ TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
} else {
tx_queue->tso_long_headers++;
- buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+ buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
if (unlikely(!buffer->heap_buf))
return NULL;
- result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+ result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
}
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
static int tso_start(struct tso_state *st, struct efx_nic *efx,
const struct sk_buff *skb)
{
- bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int header_len, in_len;
dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
st->out_len = skb->len - header_len;
- if (!use_options) {
+ if (!use_opt_desc) {
st->header_unmap_len = 0;
if (likely(in_len == 0)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e2f202e3932f..f2d7c702c77f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -37,6 +37,17 @@ config DWMAC_SUNXI
stmmac device driver. This driver is used for A20/A31
GMAC ethernet controller.
+config DWMAC_STI
+ bool "STi GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_STI
+ default y
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
config STMMAC_PCI
bool "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ecadecea79b2..dcef28775dad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000000..552bbc17863c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
+/**
+ * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+/**
+ * STi GMAC glue logic.
+ * --------------------
+ *
+ * _
+ * | \
+ * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * phyclk | |___________________________________________
+ * | | | (phyclk-in)
+ * --------|1 / |
+ * int-clk |_ / |
+ * | _
+ * | | \
+ * |_______|1 \ ETH_SEL_TX_RETIME_CLK
+ * | |___________________________
+ * | | (tx-retime-clk)
+ * _______|0 /
+ * | |_ /
+ * _ |
+ * | \ |
+ * --------|0 \ |
+ * clk_125 | |__|
+ * | | ETH_SEL_TXCLK_NOT_CLK125
+ * --------|1 /
+ * txclk |_ /
+ *
+ *
+ * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
+ * generate 50MHz clock or MAC can generate it.
+ * This bit is configured by "st,ext-phyclk" property.
+ *
+ * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
+ * clock either comes from clk-125 pin or txclk pin. This configuration is
+ * totally driven by the board wiring. This bit is configured by
+ * "st,tx-retime-src" property.
+ *
+ * TXCLK configuration is different for different phy interface modes
+ * and changes according to link speed in modes like RGMII.
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes with link speeds.
+ * ________________________________________________
+ *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
+ * ------------------------------------------------
+ *| MII | n/a | 25Mhz |
+ *| | | txclk |
+ * ------------------------------------------------
+ *| GMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | txclk |
+ * ------------------------------------------------
+ *| RGMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | clkgen |
+ * ------------------------------------------------
+ *| RMII | n/a | 25Mhz |
+ *| | |clkgen/phyclk-in |
+ * ------------------------------------------------
+ *
+ * TX lines are always retimed with a clk, which can vary depending
+ * on the board configuration. Below is the table of these bits
+ * in eth configuration register depending on source of retime clk.
+ *
+ *---------------------------------------------------------------
+ * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
+ *---------------------------------------------------------------
+ * txclk | 0 | n/a | 1 |
+ *---------------------------------------------------------------
+ * ck_125| 0 | n/a | 0 |
+ *---------------------------------------------------------------
+ * phyclk| 1 | 0 | n/a |
+ *---------------------------------------------------------------
+ * clkgen| 1 | 1 | n/a |
+ *---------------------------------------------------------------
+ */
+
+ /* Register definition */
+
+ /* 3 bits [8:6]
+ * [6:6] ETH_SEL_TXCLK_NOT_CLK125
+ * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * [8:8] ETH_SEL_TX_RETIME_CLK
+ *
+ */
+
+#define TX_RETIME_SRC_MASK GENMASK(8, 6)
+#define ETH_SEL_TX_RETIME_CLK BIT(8)
+#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
+#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
+
+#define ENMII_MASK GENMASK(5, 5)
+#define ENMII BIT(5)
+
+/**
+ * 3 bits [4:2]
+ * 000-GMII/MII
+ * 001-RGMII
+ * 010-SGMII
+ * 100-RMII
+*/
+#define MII_PHY_SEL_MASK GENMASK(4, 2)
+#define ETH_PHY_SEL_RMII BIT(4)
+#define ETH_PHY_SEL_SGMII BIT(3)
+#define ETH_PHY_SEL_RGMII BIT(2)
+#define ETH_PHY_SEL_GMII 0x0
+#define ETH_PHY_SEL_MII 0x0
+
+#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
+ iface == PHY_INTERFACE_MODE_RGMII_ID || \
+ iface == PHY_INTERFACE_MODE_RGMII_RXID || \
+ iface == PHY_INTERFACE_MODE_RGMII_TXID)
+
+#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+ iface == PHY_INTERFACE_MODE_GMII)
+
+struct sti_dwmac {
+ int interface;
+ bool ext_phyclk;
+ bool is_tx_retime_src_clk_125;
+ struct clk *clk;
+ int reg;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static u32 phy_intf_sels[] = {
+ [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
+ [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
+ [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
+ [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+};
+
+enum {
+ TX_RETIME_SRC_NA = 0,
+ TX_RETIME_SRC_TXCLK = 1,
+ TX_RETIME_SRC_CLK_125,
+ TX_RETIME_SRC_PHYCLK,
+ TX_RETIME_SRC_CLKGEN,
+};
+
+static const char *const tx_retime_srcs[] = {
+ [TX_RETIME_SRC_NA] = "",
+ [TX_RETIME_SRC_TXCLK] = "txclk",
+ [TX_RETIME_SRC_CLK_125] = "clk_125",
+ [TX_RETIME_SRC_PHYCLK] = "phyclk",
+ [TX_RETIME_SRC_CLKGEN] = "clkgen",
+};
+
+static u32 tx_retime_val[] = {
+ [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
+ [TX_RETIME_SRC_CLK_125] = 0x0,
+ [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
+ [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
+ ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
+};
+
+static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
+{
+ u32 src = 0, freq = 0;
+
+ if (spd == SPEED_100) {
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
+ dwmac->interface == PHY_INTERFACE_MODE_GMII) {
+ src = TX_RETIME_SRC_TXCLK;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ if (dwmac->ext_phyclk) {
+ src = TX_RETIME_SRC_PHYCLK;
+ } else {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 50000000;
+ }
+
+ } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 25000000;
+ }
+
+ if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
+ clk_set_rate(dwmac->clk, freq);
+
+ } else if (spd == SPEED_1000) {
+ if (dwmac->is_tx_retime_src_clk_125)
+ src = TX_RETIME_SRC_CLK_125;
+ else
+ src = TX_RETIME_SRC_TXCLK;
+ }
+
+ regmap_update_bits(dwmac->regmap, dwmac->reg,
+ TX_RETIME_SRC_MASK, tx_retime_val[src]);
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ if (dwmac->clk)
+ clk_disable_unprepare(dwmac->clk);
+}
+
+static void sti_fix_mac_speed(void *priv, unsigned int spd)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ setup_retime_src(dwmac, spd);
+
+ return;
+}
+
+static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ int err;
+
+ if (!np)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
+ if (!res)
+ return -ENODATA;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ dwmac->dev = dev;
+ dwmac->interface = of_get_phy_mode(np);
+ dwmac->regmap = regmap;
+ dwmac->reg = res->start;
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+ dwmac->is_tx_retime_src_clk_125 = false;
+
+ if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
+ const char *rs;
+
+ err = of_property_read_string(np, "st,tx-retime-src", &rs);
+ if (err < 0) {
+ dev_err(dev, "st,tx-retime-src not specified\n");
+ return err;
+ }
+
+ if (!strcasecmp(rs, "clk_125"))
+ dwmac->is_tx_retime_src_clk_125 = true;
+ }
+
+ dwmac->clk = devm_clk_get(dev, "sti-ethclk");
+
+ if (IS_ERR(dwmac->clk))
+ dwmac->clk = NULL;
+
+ return 0;
+}
+
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+ struct regmap *regmap = dwmac->regmap;
+ int iface = dwmac->interface;
+ u32 reg = dwmac->reg;
+ u32 val, spd;
+
+ if (dwmac->clk)
+ clk_prepare_enable(dwmac->clk);
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+
+ val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ regmap_update_bits(regmap, reg, ENMII_MASK, val);
+
+ if (IS_PHY_IF_MODE_GBIT(iface))
+ spd = SPEED_1000;
+ else
+ spd = SPEED_100;
+
+ setup_retime_src(dwmac, spd);
+
+ return 0;
+}
+
+static void *sti_dwmac_setup(struct platform_device *pdev)
+{
+ struct sti_dwmac *dwmac;
+ int ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sti_dwmac_parse_data(dwmac, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return ERR_PTR(ret);
+ }
+
+ return dwmac;
+}
+
+const struct stmmac_of_data sti_gmac_data = {
+ .fix_mac_speed = sti_fix_mac_speed,
+ .setup = sti_dwmac_setup,
+ .init = sti_dwmac_init,
+ .exit = sti_dwmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d9af26ed58ee..f9e60d7918c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_DWMAC_SUNXI
extern const struct stmmac_of_data sun7i_gmac_data;
#endif
+#ifdef CONFIG_DWMAC_STI
+extern const struct stmmac_of_data sti_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5884a7d2063b..c61bc72b8e90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = {
#ifdef CONFIG_DWMAC_SUNXI
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
#endif
+#ifdef CONFIG_DWMAC_STI
+ { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
+#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8e2266e1f260..79606f47a08e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
struct msix_entry msi_vec[NIU_NUM_LDG];
struct niu_parent *parent = np->parent;
struct pci_dev *pdev = np->pdev;
- int i, num_irqs, err;
+ int i, num_irqs;
u8 first_ldg;
first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
(np->port == 0 ? 3 : 1));
BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
-retry:
for (i = 0; i < num_irqs; i++) {
msi_vec[i].vector = 0;
msi_vec[i].entry = i;
}
- err = pci_enable_msix(pdev, msi_vec, num_irqs);
- if (err < 0) {
+ num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
+ if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
return;
}
- if (err > 0) {
- num_irqs = err;
- goto retry;
- }
np->flags |= NIU_FLAGS_MSIX;
for (i = 0; i < num_irqs; i++)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1d860ce914ed..0b6a2802d51b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
* common for both the interface as the interface shares
* the same hardware resource.
*/
- for (i = 0; i <= priv->data.slaves; i++)
+ for (i = 0; i < priv->data.slaves; i++)
if (priv->slaves[i].ndev->flags & IFF_PROMISC)
flag = true;
@@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
unsigned long timeout = jiffies + HZ;
/* Disable Learn for all ports */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i < priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
@@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
/* Enable Learn for all ports */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i < priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
@@ -1471,7 +1471,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(req);
int slave_no = cpsw_slave_index(priv);
if (!netif_running(dev))
@@ -1484,14 +1483,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
case SIOCGHWTSTAMP:
return cpsw_hwtstamp_get(dev, req);
#endif
- case SIOCGMIIPHY:
- data->phy_id = priv->slaves[slave_no].phy->addr;
- break;
- default:
- return -ENOTSUPP;
}
- return 0;
+ if (!priv->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1896,6 +1892,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
slave_data->phy_if = of_get_phy_mode(slave_node);
+ if (slave_data->phy_if < 0) {
+ pr_err("Missing or malformed slave[%d] phy-mode property\n",
+ i);
+ return slave_data->phy_if;
+ }
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 023237a65720..17503da9f7a5 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Return subqueue id on this core (one per core). */
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return smp_processor_id();
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1ec65feebb9e..4bfdf8c7ada0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+ ++lp->tx_bd_ci;
+ lp->tx_bd_ci %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
@@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
return NETDEV_TX_OK;
}
@@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->sw_id_offset = (u32) new_skb;
- lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+ ++lp->rx_bd_ci;
+ lp->rx_bd_ci %= RX_BD_NUM;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 7b594ce3f21d..39fc230f5c20 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -139,6 +139,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
#define NVSP_PROTOCOL_VERSION_1 2
#define NVSP_PROTOCOL_VERSION_2 0x30002
+#define NVSP_PROTOCOL_VERSION_4 0x40000
+#define NVSP_PROTOCOL_VERSION_5 0x50000
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +195,23 @@ enum {
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ /* Version 4 messages */
+ NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
+ NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
+ NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ /* Version 5 messages */
+ NVSP_MSG5_TYPE_OID_QUERY_EX,
+ NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
+ NVSP_MSG5_TYPE_SUBCHANNEL,
+ NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+ NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
};
enum {
@@ -447,10 +466,44 @@ union nvsp_2_message_uber {
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
+enum nvsp_subchannel_operation {
+ NVSP_SUBCHANNEL_NONE = 0,
+ NVSP_SUBCHANNEL_ALLOCATE,
+ NVSP_SUBCHANNEL_MAX
+};
+
+struct nvsp_5_subchannel_request {
+ u32 op;
+ u32 num_subchannels;
+} __packed;
+
+struct nvsp_5_subchannel_complete {
+ u32 status;
+ u32 num_subchannels; /* Actual number of subchannels allocated */
+} __packed;
+
+struct nvsp_5_send_indirect_table {
+ /* The number of entries in the send indirection table */
+ u32 count;
+
+ /* The offset of the send indireciton table from top of this struct.
+ * The send indirection table tells which channel to put the send
+ * traffic on. Each entry is a channel number.
+ */
+ u32 offset;
+} __packed;
+
+union nvsp_5_message_uber {
+ struct nvsp_5_subchannel_request subchn_req;
+ struct nvsp_5_subchannel_complete subchn_comp;
+ struct nvsp_5_send_indirect_table send_table;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
+ union nvsp_5_message_uber v5_msg;
} __packed;
/* ALL Messages */
@@ -506,6 +559,8 @@ struct netvsc_device {
/* Holds rndis device info */
void *extension;
+ /* The recive buffer for this device */
+ unsigned char cb_buffer[NETVSC_PACKET_SIZE];
};
/* NdisInitialize message */
@@ -846,12 +901,6 @@ struct rndis_message {
};
-struct rndis_filter_packet {
- void *completion_ctx;
- void (*completion)(void *context);
- struct rndis_message msg;
-};
-
/* Handy macros */
/* get the size of an RNDIS message. Pass in the message type, */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 03a2c6e17158..1a0280dcba7e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
NVSP_STAT_SUCCESS)
return -EINVAL;
- if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
/* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct nvsp_message *init_packet;
int ndis_version;
struct net_device *ndev;
+ u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+ NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
+ int i, num_ver = 4; /* number of different NVSP versions */
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
- if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_2) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
- } else if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_1) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
- } else {
+ for (i = num_ver - 1; i >= 0; i--)
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ ver_list[i]) == 0) {
+ net_device->nvsp_version = ver_list[i];
+ break;
+ }
+
+ if (i < 0) {
ret = -EPROTO;
goto cleanup;
}
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
- ndis_version = 0x00050001;
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+ ndis_version = 0x00050001;
+ else
+ ndis_version = 0x0006001e;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
@@ -432,17 +439,14 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
-static void netvsc_send_completion(struct hv_device *device,
+static void netvsc_send_completion(struct netvsc_device *net_device,
+ struct hv_device *device,
struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +565,13 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
struct net_device *ndev;
- struct netvsc_device *net_device = hv_get_drvdata(device);
ndev = net_device->ndev;
@@ -653,14 +657,15 @@ static void netvsc_receive_completion(void *context)
/* Send a receive completion for the xfer page packet */
if (fsend_receive_comp)
- netvsc_send_recv_completion(device, transaction_id, status);
+ netvsc_send_recv_completion(device, net_device, transaction_id,
+ status);
}
-static void netvsc_receive(struct hv_device *device,
- struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct netvsc_device *net_device,
+ struct hv_device *device,
+ struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +678,6 @@ static void netvsc_receive(struct hv_device *device,
LIST_HEAD(listHead);
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
/*
@@ -741,7 +743,7 @@ static void netvsc_receive(struct hv_device *device,
spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
flags);
- netvsc_send_recv_completion(device,
+ netvsc_send_recv_completion(device, net_device,
vmxferpage_packet->d.trans_id,
NVSP_STAT_FAIL);
@@ -800,22 +802,16 @@ static void netvsc_channel_cb(void *context)
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
- unsigned char *packet;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
- packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
- GFP_ATOMIC);
- if (!packet)
- return;
- buffer = packet;
-
net_device = get_inbound_net_device(device);
if (!net_device)
- goto out;
+ return;
ndev = net_device->ndev;
+ buffer = net_device->cb_buffer;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +821,13 @@ static void netvsc_channel_cb(void *context)
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(device, desc);
+ netvsc_send_completion(net_device,
+ device, desc);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(device, desc);
+ netvsc_receive(net_device,
+ device, desc);
break;
default:
@@ -841,23 +839,16 @@ static void netvsc_channel_cb(void *context)
break;
}
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
} else {
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
-
+ /*
+ * We are done for this pass.
+ */
break;
}
+
} else if (ret == -ENOBUFS) {
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
/* Handle large packet */
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
@@ -872,8 +863,8 @@ static void netvsc_channel_cb(void *context)
}
} while (1);
-out:
- kfree(buffer);
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
return;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7756118c2f0a..9ef6be90a81c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
int ret = 0;
+ netif_carrier_off(net);
+
/* Open up the device */
ret = rndis_filter_open(device_obj);
if (ret != 0) {
@@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net)
netif_start_queue(net);
+ nvdev = hv_get_drvdata(device_obj);
+ rdev = nvdev->extension;
+ if (!rdev->link_state)
+ netif_carrier_on(net);
+
return ret;
}
@@ -146,7 +155,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_filter_packet) +
+ sizeof(struct rndis_message) +
NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
@@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct net_device *net;
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
net_device = hv_get_drvdata(device_obj);
+ rdev = net_device->extension;
+
+ rdev->link_state = status != 1;
+
net = net_device->ndev;
- if (!net) {
- netdev_err(net, "got link status but net device "
- "not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED)
return;
- }
+ ndev_ctx = netdev_priv(net);
if (status == 1) {
- netif_carrier_on(net);
- ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
- netif_carrier_off(net);
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
}
}
@@ -317,7 +327,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
- if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU;
if (mtu < 68 || mtu > limit)
@@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = {
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
* another netif_notify_peers() into a delayed work, otherwise GARP packet
* will not be sent after quick migration, and cause network disconnection.
+ * Also, we update the carrier status here.
*/
-static void netvsc_send_garp(struct work_struct *w)
+static void netvsc_link_change(struct work_struct *w)
{
struct net_device_context *ndev_ctx;
struct net_device *net;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
+ bool notify;
+
+ rtnl_lock();
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ rdev = net_device->extension;
net = net_device->ndev;
- netdev_notify_peers(net);
+
+ if (rdev->link_state) {
+ netif_carrier_off(net);
+ notify = false;
+ } else {
+ netif_carrier_on(net);
+ notify = true;
+ }
+
+ rtnl_unlock();
+
+ if (notify)
+ netdev_notify_peers(net);
}
@@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev,
if (!net)
return -ENOMEM;
- /* Set initial state */
- netif_carrier_off(net);
-
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
- INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+ INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
net->netdev_ops = &device_ops;
@@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- netif_carrier_on(net);
-
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1084e5de3ceb..f0cc8ef21e1c 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -58,9 +58,6 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
-static void rndis_filter_send_completion(void *ctx);
-
-
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -277,7 +274,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
"rndis response buffer overflow "
"detected (size %u max %zu)\n",
resp->msg_len,
- sizeof(struct rndis_filter_packet));
+ sizeof(struct rndis_message));
if (resp->ndis_msg_type ==
RNDIS_MSG_RESET_C) {
@@ -898,17 +895,14 @@ int rndis_filter_close(struct hv_device *dev)
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt)
{
- int ret;
- struct rndis_filter_packet *filter_pkt;
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
/* Add the rndis header */
- filter_pkt = (struct rndis_filter_packet *)pkt->extension;
+ rndis_msg = (struct rndis_message *)pkt->extension;
- rndis_msg = &filter_pkt->msg;
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
if (isvlan)
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
@@ -961,34 +955,5 @@ int rndis_filter_send(struct hv_device *dev,
pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
}
- /* Save the packet send completion and context */
- filter_pkt->completion = pkt->completion.send.send_completion;
- filter_pkt->completion_ctx =
- pkt->completion.send.send_completion_ctx;
-
- /* Use ours */
- pkt->completion.send.send_completion = rndis_filter_send_completion;
- pkt->completion.send.send_completion_ctx = filter_pkt;
-
- ret = netvsc_send(dev, pkt);
- if (ret != 0) {
- /*
- * Reset the completion to originals to allow retries from
- * above
- */
- pkt->completion.send.send_completion =
- filter_pkt->completion;
- pkt->completion.send.send_completion_ctx =
- filter_pkt->completion_ctx;
- }
-
- return ret;
-}
-
-static void rndis_filter_send_completion(void *ctx)
-{
- struct rndis_filter_packet *filter_pkt = ctx;
-
- /* Pass it back to the original handler */
- filter_pkt->completion(filter_pkt->completion_ctx);
+ return netvsc_send(dev, pkt);
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ab31544bc254..34bf011584fb 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -37,7 +37,6 @@
struct at86rf230_local {
struct spi_device *spi;
- int rstn, slp_tr, dig2;
u8 part;
u8 vers;
@@ -53,8 +52,16 @@ struct at86rf230_local {
spinlock_t lock;
bool irq_busy;
bool is_tx;
+ bool tx_aret;
+
+ int rssi_base_val;
};
+static inline int is_rf212(struct at86rf230_local *local)
+{
+ return local->part == 7;
+}
+
#define RG_TRX_STATUS (0x01)
#define SR_TRX_STATUS 0x01, 0x1f, 0
#define SR_RESERVED_01_3 0x01, 0x20, 5
@@ -100,7 +107,10 @@ struct at86rf230_local {
#define SR_SFD_VALUE 0x0b, 0xff, 0
#define RG_TRX_CTRL_2 (0x0c)
#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
#define RG_ANT_DIV (0x0d)
#define SR_ANT_CTRL 0x0d, 0x03, 0
@@ -145,7 +155,7 @@ struct at86rf230_local {
#define SR_RESERVED_17_5 0x17, 0x08, 3
#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
#define SR_RESERVED_17_1 0x17, 0x80, 7
#define RG_FTN_CTRL (0x18)
#define SR_RESERVED_18_2 0x18, 0x7f, 0
@@ -244,6 +254,57 @@ struct at86rf230_local {
#define STATE_TRANSITION_IN_PROGRESS 0x1F
static int
+__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
+ u8 *version)
+{
+ u8 data[4];
+ u8 *buf = kmalloc(2, GFP_KERNEL);
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ u8 reg;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
+ buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(spi, &msg);
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ data[reg - RG_PART_NUM] = buf[1];
+ else
+ break;
+ }
+
+ if (status == 0) {
+ *part = data[0];
+ *version = data[1];
+ *man_id = (data[3] << 8) | data[2];
+ }
+
+ kfree(buf);
+
+ return status;
+}
+
+static int
__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
{
u8 *buf = lp->buf;
@@ -489,7 +550,9 @@ at86rf230_state(struct ieee802154_dev *dev, int state)
} while (val == STATE_TRANSITION_IN_PROGRESS);
- if (val == desired_status)
+ if (val == desired_status ||
+ (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
+ (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
return 0;
pr_err("unexpected state change: %d, asked for %d\n", val, state);
@@ -510,7 +573,11 @@ at86rf230_start(struct ieee802154_dev *dev)
if (rc)
return rc;
- return at86rf230_state(dev, STATE_RX_ON);
+ rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_AACK_ON);
}
static void
@@ -520,6 +587,39 @@ at86rf230_stop(struct ieee802154_dev *dev)
}
static int
+at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ lp->rssi_base_val = -91;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
+at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ int rc;
+
+ if (channel == 0)
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0);
+ else
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1);
+ if (rc < 0)
+ return rc;
+
+ if (page == 0) {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
+ lp->rssi_base_val = -100;
+ } else {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
+ lp->rssi_base_val = -98;
+ }
+ if (rc < 0)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
{
struct at86rf230_local *lp = dev->priv;
@@ -527,14 +627,22 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
might_sleep();
- if (page != 0 || channel < 11 || channel > 26) {
+ if (page < 0 || page > 31 ||
+ !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
WARN_ON(1);
return -EINVAL;
}
- rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ if (is_rf212(lp))
+ rc = at86rf212_set_channel(lp, page, channel);
+ else
+ rc = at86rf230_set_channel(lp, page, channel);
+ if (rc < 0)
+ return rc;
+
msleep(1); /* Wait for PLL */
dev->phy->current_channel = channel;
+ dev->phy->current_page = page;
return 0;
}
@@ -568,6 +676,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc)
goto err_rx;
+ if (lp->tx_aret) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
+ if (rc)
+ goto err_rx;
+ }
+
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
if (rc)
goto err_rx;
@@ -668,6 +782,98 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
return 0;
}
+static int
+at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
+ * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
+ * 0dB.
+ * thus, supported values for db range from -26 to 5, for 31dB of
+ * reduction to 0dB of reduction.
+ */
+ if (db > 5 || db < -26)
+ return -EINVAL;
+
+ db = -(db - 5);
+
+ rc = __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
+}
+
+static int
+at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+}
+
+static int
+at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int desens_steps;
+
+ if (level < lp->rssi_base_val || level > 30)
+ return -EINVAL;
+
+ desens_steps = (level - lp->rssi_base_val) * 100 / 207;
+
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+}
+
+static int
+at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+ u8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ if (min_be > max_be || max_be > 8 || retries > 5)
+ return -EINVAL;
+
+ rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
+ if (rc)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be);
+ if (rc)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, max_be);
+}
+
+static int
+at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc = 0;
+
+ if (retries < -1 || retries > 15)
+ return -EINVAL;
+
+ lp->tx_aret = retries >= 0;
+
+ if (retries >= 0)
+ rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
+
+ return rc;
+}
+
static struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
.xmit = at86rf230_xmit,
@@ -678,6 +884,22 @@ static struct ieee802154_ops at86rf230_ops = {
.set_hw_addr_filt = at86rf230_set_hw_addr_filt,
};
+static struct ieee802154_ops at86rf212_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+ .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+ .set_txpower = at86rf212_set_txpower,
+ .set_lbt = at86rf212_set_lbt,
+ .set_cca_mode = at86rf212_set_cca_mode,
+ .set_cca_ed_level = at86rf212_set_cca_ed_level,
+ .set_csma_params = at86rf212_set_csma_params,
+ .set_frame_retries = at86rf212_set_frame_retries,
+};
+
static void at86rf230_irqwork(struct work_struct *work)
{
struct at86rf230_local *lp =
@@ -752,22 +974,15 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
int rc, irq_pol;
u8 status;
+ u8 csma_seed[2];
rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
if (rc)
return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- if (status == STATE_P_ON) {
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
- if (rc)
- return rc;
- msleep(1);
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- }
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+ if (rc)
+ return rc;
/* configure irq polarity, defaults to high active */
if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
@@ -783,6 +998,14 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
if (rc)
return rc;
+ get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
+ if (rc)
+ return rc;
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
+ if (rc)
+ return rc;
+
/* CLKM changes are applied immediately */
rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
if (rc)
@@ -795,16 +1018,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
- if (rc)
- return rc;
- msleep(1);
-
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
-
rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
if (rc)
return rc;
@@ -824,26 +1037,18 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static void at86rf230_fill_data(struct spi_device *spi)
-{
- struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
-
- lp->rstn = pdata->rstn;
- lp->slp_tr = pdata->slp_tr;
- lp->dig2 = pdata->dig2;
-}
-
static int at86rf230_probe(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
struct ieee802154_dev *dev;
struct at86rf230_local *lp;
- u8 man_id_0, man_id_1, status;
+ u16 man_id = 0;
+ u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc, supported = 0;
+ int rc;
const char *chip;
+ struct ieee802154_ops *ops = NULL;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
@@ -856,116 +1061,117 @@ static int at86rf230_probe(struct spi_device *spi)
return -EINVAL;
}
- dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
- if (!dev)
- return -ENOMEM;
-
- lp = dev->priv;
- lp->dev = dev;
-
- lp->spi = spi;
-
- dev->parent = &spi->dev;
- dev->extra_tx_headroom = 0;
- /* We do support only 2.4 Ghz */
- dev->phy->channels_supported[0] = 0x7FFF800;
- dev->flags = IEEE802154_HW_OMIT_CKSUM;
-
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- irq_worker = at86rf230_irqwork;
- irq_handler = at86rf230_isr;
- } else {
- irq_worker = at86rf230_irqwork_level;
- irq_handler = at86rf230_isr_level;
- }
-
- mutex_init(&lp->bmux);
- INIT_WORK(&lp->irqwork, irq_worker);
- spin_lock_init(&lp->lock);
- init_completion(&lp->tx_complete);
-
- spi_set_drvdata(spi, lp);
-
- at86rf230_fill_data(spi);
-
- rc = gpio_request(lp->rstn, "rstn");
+ rc = gpio_request(pdata->rstn, "rstn");
if (rc)
- goto err_rstn;
+ return rc;
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_request(pdata->slp_tr, "slp_tr");
if (rc)
goto err_slp_tr;
}
- rc = gpio_direction_output(lp->rstn, 1);
+ rc = gpio_direction_output(pdata->rstn, 1);
if (rc)
goto err_gpio_dir;
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_direction_output(lp->slp_tr, 0);
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_direction_output(pdata->slp_tr, 0);
if (rc)
goto err_gpio_dir;
}
/* Reset */
msleep(1);
- gpio_set_value(lp->rstn, 0);
+ gpio_set_value(pdata->rstn, 0);
msleep(1);
- gpio_set_value(lp->rstn, 1);
+ gpio_set_value(pdata->rstn, 1);
msleep(1);
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
- if (rc)
- goto err_gpio_dir;
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
- if (rc)
+ rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
+ if (rc < 0)
goto err_gpio_dir;
- if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
- man_id_1, man_id_0);
+ man_id >> 8, man_id & 0xFF);
rc = -EINVAL;
goto err_gpio_dir;
}
- rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
- if (rc)
- goto err_gpio_dir;
-
- rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
- if (rc)
- goto err_gpio_dir;
-
- switch (lp->part) {
+ switch (part) {
case 2:
chip = "at86rf230";
- /* supported = 1; FIXME: should be easy to support; */
+ /* FIXME: should be easy to support; */
break;
case 3:
chip = "at86rf231";
- supported = 1;
+ ops = &at86rf230_ops;
+ break;
+ case 7:
+ chip = "at86rf212";
+ if (version == 1)
+ ops = &at86rf212_ops;
break;
default:
chip = "UNKNOWN";
break;
}
- dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
- if (!supported) {
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
+ if (!ops) {
rc = -ENOTSUPP;
goto err_gpio_dir;
}
+ dev = ieee802154_alloc_device(sizeof(*lp), ops);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto err_gpio_dir;
+ }
+
+ lp = dev->priv;
+ lp->dev = dev;
+ lp->part = part;
+ lp->vers = version;
+
+ lp->spi = spi;
+
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+ if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_worker = at86rf230_irqwork;
+ irq_handler = at86rf230_isr;
+ } else {
+ irq_worker = at86rf230_irqwork_level;
+ irq_handler = at86rf230_isr_level;
+ }
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, irq_worker);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ if (is_rf212(lp)) {
+ dev->phy->channels_supported[0] = 0x00007FF;
+ dev->phy->channels_supported[2] = 0x00007FF;
+ } else {
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ }
+
rc = at86rf230_hw_init(lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
rc = request_irq(spi->irq, irq_handler,
IRQF_SHARED | pdata->irq_type,
dev_name(&spi->dev), lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
/* Read irq status register to reset irq line */
rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
@@ -980,30 +1186,33 @@ static int at86rf230_probe(struct spi_device *spi)
err_irq:
free_irq(spi->irq, lp);
+err_hw_init:
flush_work(&lp->irqwork);
-err_gpio_dir:
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
-err_slp_tr:
- gpio_free(lp->rstn);
-err_rstn:
+ spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
+
+err_gpio_dir:
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+err_slp_tr:
+ gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
ieee802154_unregister_device(lp->dev);
free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
- gpio_free(lp->rstn);
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+ gpio_free(pdata->rstn);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441afeb96..24b6dddd7f2f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
sirdev_put_instance(priv->dev);
/* Stop tty */
- irtty_stop_receiver(tty, TRUE);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (tty->ops->stop)
tty->ops->stop(tty);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c5011e078e1b..771c9bfa7d31 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -136,16 +136,9 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- int i;
- dev->lstats = alloc_percpu(struct pcpu_lstats);
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *lb_stats;
- lb_stats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&lb_stats->syncp);
- }
return 0;
}
@@ -160,6 +153,7 @@ static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit= loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
+ .ndo_set_mac_address = eth_mac_addr,
};
/*
@@ -174,6 +168,7 @@ static void loopback_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8433de4509c7..44227c25a276 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -534,7 +534,6 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
- int i;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -546,16 +545,10 @@ static int macvlan_init(struct net_device *dev)
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct vlan_pcpu_stats *mvlstats;
- mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
- u64_stats_init(&mvlstats->syncp);
- }
-
return 0;
}
@@ -879,14 +872,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
- goto destroy_port;
-
+ goto unregister_netdev;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+unregister_netdev:
+ unregister_netdevice(dev);
destroy_port:
port->count -= 1;
if (!port->count)
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index d2bb12bfabd5..14ce7de6a933 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,16 +47,7 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
static int nlmon_dev_init(struct net_device *dev)
{
- int i;
-
- dev->lstats = alloc_percpu(struct pcpu_lstats);
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *nlmstats;
- nlmstats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&nlmstats->syncp);
- }
-
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
return dev->lstats == NULL ? -ENOMEM : 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9b5d46c03eed..6a17f92153b3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -71,6 +71,12 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM7XXX_PHY
+ tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+ ---help---
+ Currently supports the BCM7366, BCM7439, BCM7445, and
+ 40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+
config BCM87XX_PHY
tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 9013dfa12aa3..07d24024863e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
new file mode 100644
index 000000000000..697337220016
--- /dev/null
+++ b/drivers/net/phy/bcm7xxx.c
@@ -0,0 +1,343 @@
+/*
+ * Broadcom BCM7xxx internal transceivers support.
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/brcmphy.h>
+
+/* Broadcom BCM7xxx internal PHY registers */
+#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000
+
+/* 40nm only register definitions */
+#define MII_BCM7XXX_100TX_AUX_CTL 0x10
+#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
+#define MII_BCM7XXX_100TX_DISC 0x14
+#define MII_BCM7XXX_AUX_MODE 0x1d
+#define MII_BCM7XX_64CLK_MDIO BIT(12)
+#define MII_BCM7XXX_CORE_BASE1E 0x1e
+#define MII_BCM7XXX_TEST 0x1f
+#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
+
+static int bcm7445_config_init(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7445_regs {
+ int reg;
+ u16 value;
+ } bcm7445_regs_cfg[] = {
+ /* increases ADC latency by 24ns */
+ { MII_BCM54XX_EXP_SEL, 0x0038 },
+ { MII_BCM54XX_EXP_DATA, 0xAB95 },
+ /* increases internal 1V LDO voltage by 5% */
+ { MII_BCM54XX_EXP_SEL, 0x2038 },
+ { MII_BCM54XX_EXP_DATA, 0xBB22 },
+ /* reduce RX low pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x6038 },
+ { MII_BCM54XX_EXP_DATA, 0xFFC5 },
+ /* reduce RX high pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x003a },
+ { MII_BCM54XX_EXP_DATA, 0x2002 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7445_regs_cfg[i].reg,
+ bcm7445_regs_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void phy_write_exp(struct phy_device *phydev,
+ u16 reg, u16 value)
+{
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static void phy_write_misc(struct phy_device *phydev,
+ u16 reg, u16 chl, u16 value)
+{
+ int tmp;
+
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+
+ tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+
+ tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
+
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+{
+ /* write AFE_RXCONFIG_0 */
+ phy_write_misc(phydev, 0x38, 0x0000, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ phy_write_misc(phydev, 0x38, 0x0001, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ phy_write_misc(phydev, 0x38, 0x0003, 0x7fc7);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ phy_write_misc(phydev, 0x3A, 0x0000, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ phy_write_misc(phydev, 0x39, 0x0000, 0x0800);
+
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ phy_write_misc(phydev, 0x0032, 0x0001, 0x0048);
+
+ /* Change Ki to 011 */
+ phy_write_misc(phydev, 0x0032, 0x0002, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ phy_write_misc(phydev, 0x0033, 0x0000, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ phy_write_misc(phydev, 0x000a, 0x0000, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+
+ /* Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0000);
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm7445_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return bcm7xxx_28nm_afe_config_init(phydev);
+}
+
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
+{
+ int v, ret;
+
+ v = phy_read(dev, location);
+ if (v < 0)
+ return v;
+
+ v &= ~clr_mask;
+ v |= set_mask;
+
+ ret = phy_write(dev, location, v);
+ if (ret < 0)
+ return ret;
+
+ return v;
+}
+
+static int bcm7xxx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable 64 clock MDIO */
+ phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
+ phy_read(phydev, MII_BCM7XXX_AUX_MODE);
+
+ /* Workaround only required for 100Mbits/sec */
+ if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR))
+ return 0;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ /* set iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0F00);
+ udelay(10);
+
+ /* reset iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0C00);
+
+ phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
+
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Workaround for putting the PHY in IDDQ mode, required
+ * for all BCM7XXX PHYs
+ */
+static int bcm7xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7xxx_regs {
+ int reg;
+ u16 value;
+ } bcm7xxx_suspend_cfg[] = {
+ { MII_BCM7XXX_TEST, 0x008b },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x01c0 },
+ { MII_BCM7XXX_100TX_DISC, 0x7000 },
+ { MII_BCM7XXX_TEST, 0x000f },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x20d0 },
+ { MII_BCM7XXX_TEST, 0x000b },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7xxx_suspend_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7xxx_suspend_cfg[i].reg,
+ bcm7xxx_suspend_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static struct phy_driver bcm7xxx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM7366,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7366",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7439,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7439",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7445,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7445",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .name = "Broadcom BCM7XXX 28nm",
+ .phy_id = PHY_ID_BCM7XXX_28,
+ .phy_id_mask = PHY_BCM_OUI_MASK,
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_4,
+ .phy_id_mask = 0xffff0000,
+ .name = "Broadcom BCM7XXX 40nm",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_5,
+ .phy_id_mask = 0xffffff00,
+ .name = "Broadcom BCM7XXX 65nm",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_dummy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM7366, 0xfffffff0, },
+ { PHY_ID_BCM7439, 0xfffffff0, },
+ { PHY_ID_BCM7445, 0xfffffff0, },
+ { PHY_ID_BCM7XXX_28, 0xfffffc00 },
+ { PHY_BCM_OUI_4, 0xffff0000 },
+ { PHY_BCM_OUI_5, 0xffffff00 },
+ { }
+};
+
+static int __init bcm7xxx_phy_init(void)
+{
+ return phy_drivers_register(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+static void __exit bcm7xxx_phy_exit(void)
+{
+ phy_drivers_unregister(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+module_init(bcm7xxx_phy_init);
+module_exit(bcm7xxx_phy_exit);
+
+MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
+
+MODULE_DESCRIPTION("Broadcom BCM7xxx internal PHY driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f8c90ea75108..34088d60da74 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -25,58 +25,6 @@
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
-
-#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
-#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
-#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
-
-#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
-#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
-
-#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
-#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
-#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
-#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
-
-#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
-#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
-#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
-#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
-#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
-#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
-#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
-#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
-#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
-#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
-#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
-#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
-#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
-#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
-#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
-#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
-#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
-#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
-
-#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
-#define MII_BCM54XX_SHD_WRITE 0x8000
-#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
-#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
-
-/*
- * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
- */
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
-#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-
-
/*
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
* BCM5482, and possibly some others.
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 9414fa272160..98e7cbf720a5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1006,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
} else
list_add_tail(&dp83640->list, &clock->phylist);
- if (clock->chosen && !list_empty(&clock->phylist))
- recalibrate(clock);
- else
- enable_broadcast(dp83640->phydev, clock->page, 1);
-
dp83640_clock_put(clock);
return 0;
@@ -1063,6 +1058,14 @@ static void dp83640_remove(struct phy_device *phydev)
static int dp83640_config_init(struct phy_device *phydev)
{
+ struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_clock *clock = dp83640->clock;
+
+ if (clock->chosen && !list_empty(&clock->phylist))
+ recalibrate(clock);
+ else
+ enable_broadcast(phydev, clock->page, 1);
+
enable_status_frames(phydev, true);
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
return 0;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 71e49000fbf3..76f54b32a120 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -432,8 +432,28 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(phy_id);
+static ssize_t
+phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%s\n", phy_modes(phydev->interface));
+}
+static DEVICE_ATTR_RO(phy_interface);
+
+static ssize_t
+phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%d\n", phydev->has_fixups);
+}
+static DEVICE_ATTR_RO(phy_has_fixups);
+
static struct attribute *mdio_dev_attrs[] = {
&dev_attr_phy_id.attr,
+ &dev_attr_phy_interface.attr,
+ &dev_attr_phy_has_fixups.attr,
NULL,
};
ATTRIBUTE_GROUPS(mdio_dev);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19c9eca0ef26..643b5d665f41 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -38,6 +38,26 @@
#include <asm/irq.h>
+static const char *phy_speed_to_str(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
+ case SPEED_100:
+ return "100Mbps";
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported (update phy.c)";
+ }
+}
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -45,12 +65,13 @@
void phy_print_status(struct phy_device *phydev)
{
if (phydev->link) {
- pr_info("%s - Link is Up - %d/%s\n",
- dev_name(&phydev->dev),
- phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ netdev_info(phydev->attached_dev,
+ "Link is Up - %s/%s - flow control %s\n",
+ phy_speed_to_str(phydev->speed),
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
+ phydev->pause ? "rx/tx" : "off");
} else {
- pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ netdev_info(phydev->attached_dev, "Link is Down\n");
}
}
EXPORT_SYMBOL(phy_print_status);
@@ -62,7 +83,7 @@ EXPORT_SYMBOL(phy_print_status);
* If the @phydev driver has an ack_interrupt function, call it to
* ack and clear the phy device's interrupt.
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
@@ -77,7 +98,7 @@ static int phy_clear_interrupt(struct phy_device *phydev)
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
@@ -93,15 +114,16 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
* phy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
- * Description: Reads the status register and returns 0 either if
- * auto-negotiation is incomplete, or if there was an error.
- * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ * Description: Return the auto-negotiation status from this @phydev
+ * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
+ * is still pending.
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval = phy_read(phydev, MII_BMSR);
+ if (phydev->drv->aneg_done)
+ return phydev->drv->aneg_done(phydev);
- return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+ return genphy_aneg_done(phydev);
}
/* A structure for mapping a particular speed and duplex
@@ -283,7 +305,10 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
- cmd->port = PORT_MII;
+ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ cmd->port = PORT_BNC;
+ else
+ cmd->port = PORT_MII;
cmd->phy_address = phydev->addr;
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 82514e72b3d8..a70b604ac644 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -139,6 +139,7 @@ static int phy_scan_fixups(struct phy_device *phydev)
mutex_unlock(&phy_fixup_lock);
return err;
}
+ phydev->has_fixups = true;
}
}
mutex_unlock(&phy_fixup_lock);
@@ -534,16 +535,16 @@ static int phy_poll_reset(struct phy_device *phydev)
int phy_init_hw(struct phy_device *phydev)
{
- int ret;
+ int ret = 0;
if (!phydev->drv || !phydev->drv->config_init)
return 0;
- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
- if (ret < 0)
- return ret;
+ if (phydev->drv->soft_reset)
+ ret = phydev->drv->soft_reset(phydev);
+ else
+ ret = genphy_soft_reset(phydev);
- ret = phy_poll_reset(phydev);
if (ret < 0)
return ret;
@@ -865,6 +866,22 @@ int genphy_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_aneg);
+/**
+ * genphy_aneg_done - return auto-negotiation status
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the status register and returns 0 either if
+ * auto-negotiation is incomplete, or if there was an error.
+ * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+int genphy_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+EXPORT_SYMBOL(genphy_aneg_done);
+
static int gen10g_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -1028,6 +1045,27 @@ static int gen10g_read_status(struct phy_device *phydev)
return 0;
}
+/**
+ * genphy_soft_reset - software reset the PHY via BMCR_RESET bit
+ * @phydev: target phy_device struct
+ *
+ * Description: Perform a software PHY reset using the standard
+ * BMCR_RESET bit and poll for the reset bit to be cleared.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_poll_reset(phydev);
+}
+EXPORT_SYMBOL(genphy_soft_reset);
+
static int genphy_config_init(struct phy_device *phydev)
{
int val;
@@ -1074,6 +1112,12 @@ static int genphy_config_init(struct phy_device *phydev)
return 0;
}
+static int gen10g_soft_reset(struct phy_device *phydev)
+{
+ /* Do nothing for now */
+ return 0;
+}
+
static int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
@@ -1248,9 +1292,11 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
+ .soft_reset = genphy_soft_reset,
.config_init = genphy_config_init,
.features = 0,
.config_aneg = genphy_config_aneg,
+ .aneg_done = genphy_aneg_done,
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1259,6 +1305,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic 10G PHY",
+ .soft_reset = gen10g_soft_reset,
.config_init = gen10g_config_init,
.features = 0,
.config_aneg = gen10g_config_aneg,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 28407426fd6f..aea92f02401b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1540,16 +1540,10 @@ static int team_init(struct net_device *dev)
mutex_init(&team->lock);
team_set_no_mode(team);
- team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct team_pcpu_stats *team_stats;
- team_stats = per_cpu_ptr(team->pcpu_stats, i);
- u64_stats_init(&team_stats->syncp);
- }
-
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
@@ -1648,7 +1642,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44c4db8450f0..8fe9cb7d0f72 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 409499fdb157..7e7269fd3707 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -296,7 +296,6 @@ config USB_NET_SR9800
tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
select CRC32
- default y
---help---
Say Y if you want to use one of the following 100Mbps USB Ethernet
device based on the CoreChip-sz SR9800 chip.
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9765a7d4766d..5d194093f3e1 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = {
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d6f64dad05bc..955df81a4358 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
skb_trim(skb, skb->len - 4);
memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
le32_to_cpus(&rx_hdr);
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index e4a8a93fbaf7..1cc24e6f23e2 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u32 size;
u32 count;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
header = (struct gl_header *) skb->data;
// get the packet count of the received skb
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a305a7b2dae6..82d844a8ebd0 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
- if (skb->len == 0) {
- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len) {
+ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 0a85d9227775..4cbdb1307f3e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
struct nc_trailer *trailer;
u16 hdr_len, packet_len;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
if (!(skb->len & 0x01)) {
netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ff5c87128ffe..313cb6cd4848 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
__be16 proto;
- /* usbnet rx_complete guarantees that skb->len is at least
- * hard_header_len, so we can inspect the dest address without
- * checking skb->len
- */
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -732,6 +732,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d89dbe395ad2..0654bd3c4591 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -23,7 +23,7 @@
#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
+#define DRIVER_VERSION "v1.05.0 (2014/02/18)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -62,6 +62,8 @@
#define PLA_RSTTELLY 0xe800
#define PLA_CR 0xe813
#define PLA_CRWECR 0xe81c
+#define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */
+#define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */
#define PLA_CONFIG5 0xe822
#define PLA_PHY_PWR 0xe84c
#define PLA_OOB_CTRL 0xe84f
@@ -216,7 +218,14 @@
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
+/* PLA_CONFIG34 */
+#define LINK_ON_WAKE_EN 0x0010
+#define LINK_OFF_WAKE_EN 0x0008
+
/* PLA_CONFIG5 */
+#define BWF_EN 0x0040
+#define MWF_EN 0x0020
+#define UWF_EN 0x0010
#define LAN_WAKE_EN 0x0002
/* PLA_LED_FEATURE */
@@ -436,6 +445,8 @@ enum rtl8152_flags {
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
+ SELECTIVE_SUSPEND,
+ PHY_RESET,
};
/* Define these values to match your device */
@@ -514,11 +525,13 @@ struct r8152 {
void (*init)(struct r8152 *);
int (*enable)(struct r8152 *);
void (*disable)(struct r8152 *);
+ void (*up)(struct r8152 *);
void (*down)(struct r8152 *);
void (*unload)(struct r8152 *);
} rtl_ops;
int intr_interval;
+ u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
u16 ocp_base;
@@ -865,11 +878,21 @@ static u16 sram_read(struct r8152 *tp, u16 addr)
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
+ int ret;
if (phy_id != R8152_PHY_ID)
return -EINVAL;
- return r8152_mdio_read(tp, reg);
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = r8152_mdio_read(tp, reg);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static
@@ -880,7 +903,12 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R8152_PHY_ID)
return;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
r8152_mdio_write(tp, reg, val);
+
+ usb_autopm_put_interface(tp->intf);
}
static
@@ -889,11 +917,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
+ int ret;
u8 node_id[8] = {0};
- if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
+ if (tp->version == RTL_VER_01)
+ ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
+ else
+ ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
+
+ if (ret < 0) {
netif_notice(tp, probe, dev, "inet addr fail\n");
- else {
+ } else {
+ if (tp->version != RTL_VER_01) {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_CONFIG);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
+ sizeof(node_id), node_id);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_NORAML);
+ }
+
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
@@ -951,6 +994,8 @@ static void read_bulk_callback(struct urb *urb)
if (!netif_carrier_ok(netdev))
return;
+ usb_mark_last_busy(tp->udev);
+
switch (status) {
case 0:
if (urb->actual_length < ETH_ZLEN)
@@ -1018,6 +1063,8 @@ static void write_bulk_callback(struct urb *urb)
list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, flags);
+ usb_autopm_put_interface_async(tp->intf);
+
if (!netif_carrier_ok(tp->netdev))
return;
@@ -1028,7 +1075,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- tasklet_schedule(&tp->tl);
+ schedule_delayed_work(&tp->schedule, 0);
}
static void intr_callback(struct urb *urb)
@@ -1284,9 +1331,16 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
{
- int remain;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ unsigned long flags;
+ int remain, ret;
u8 *tx_data;
+ __skb_queue_head_init(&skb_head);
+ spin_lock_irqsave(&tx_queue->lock, flags);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock_irqrestore(&tx_queue->lock, flags);
+
tx_data = agg->head;
agg->skb_num = agg->skb_len = 0;
remain = rx_buf_sz;
@@ -1296,14 +1350,14 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct sk_buff *skb;
unsigned int len;
- skb = skb_dequeue(&tp->tx_queue);
+ skb = __skb_dequeue(&skb_head);
if (!skb)
break;
remain -= sizeof(*tx_desc);
len = skb->len;
if (remain < len) {
- skb_queue_head(&tp->tx_queue, skb);
+ __skb_queue_head(&skb_head, skb);
break;
}
@@ -1321,28 +1375,50 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
}
- netif_tx_lock(tp->netdev);
+ if (!skb_queue_empty(&skb_head)) {
+ spin_lock_irqsave(&tx_queue->lock, flags);
+ skb_queue_splice(&skb_head, tx_queue);
+ spin_unlock_irqrestore(&tx_queue->lock, flags);
+ }
+
+ netif_tx_lock_bh(tp->netdev);
if (netif_queue_stopped(tp->netdev) &&
skb_queue_len(&tp->tx_queue) < tp->tx_qlen)
netif_wake_queue(tp->netdev);
- netif_tx_unlock(tp->netdev);
+ netif_tx_unlock_bh(tp->netdev);
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out_tx_fill;
usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
agg->head, (int)(tx_data - (u8 *)agg->head),
(usb_complete_t)write_bulk_callback, agg);
- return usb_submit_urb(agg->urb, GFP_ATOMIC);
+ ret = usb_submit_urb(agg->urb, GFP_KERNEL);
+ if (ret < 0)
+ usb_autopm_put_interface(tp->intf);
+
+out_tx_fill:
+ return ret;
}
static void rx_bottom(struct r8152 *tp)
{
unsigned long flags;
- struct list_head *cursor, *next;
+ struct list_head *cursor, *next, rx_queue;
+ if (list_empty(&tp->rx_done))
+ return;
+
+ INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
- list_for_each_safe(cursor, next, &tp->rx_done) {
+ list_splice_init(&tp->rx_done, &rx_queue);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc;
struct rx_agg *agg;
int len_used = 0;
@@ -1351,7 +1427,6 @@ static void rx_bottom(struct r8152 *tp)
int ret;
list_del_init(cursor);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
agg = list_entry(cursor, struct rx_agg, list);
urb = agg->urb;
@@ -1389,7 +1464,7 @@ static void rx_bottom(struct r8152 *tp)
memcpy(skb->data, rx_data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
@@ -1401,13 +1476,13 @@ static void rx_bottom(struct r8152 *tp)
submit:
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- spin_lock_irqsave(&tp->rx_lock, flags);
if (ret && ret != -ENODEV) {
- list_add_tail(&agg->list, next);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
tasklet_schedule(&tp->tl);
}
}
- spin_unlock_irqrestore(&tp->rx_lock, flags);
}
static void tx_bottom(struct r8152 *tp)
@@ -1465,7 +1540,6 @@ static void bottom_half(unsigned long data)
return;
rx_bottom(tp);
- tx_bottom(tp);
}
static
@@ -1478,6 +1552,27 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return usb_submit_urb(agg->urb, mem_flags);
}
+static void rtl_drop_queued_tx(struct r8152 *tp)
+{
+ struct net_device_stats *stats = &tp->netdev->stats;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(tx_queue))
+ return;
+
+ __skb_queue_head_init(&skb_head);
+ spin_lock_irqsave(&tx_queue->lock, flags);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock_irqrestore(&tx_queue->lock, flags);
+
+ while ((skb = __skb_dequeue(&skb_head))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+}
+
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1554,7 +1649,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
netif_stop_queue(netdev);
if (!list_empty(&tp->tx_free))
- tasklet_schedule(&tp->tl);
+ schedule_delayed_work(&tp->schedule, 0);
return NETDEV_TX_OK;
}
@@ -1613,6 +1708,18 @@ static void rtl_set_eee_plus(struct r8152 *tp)
}
}
+static void rxdy_gated_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ if (enable)
+ ocp_data |= RXDY_GATED_EN;
+ else
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
@@ -1624,9 +1731,7 @@ static int rtl_enable(struct r8152 *tp)
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
INIT_LIST_HEAD(&tp->rx_done);
ret = 0;
@@ -1681,8 +1786,6 @@ static int rtl8153_enable(struct r8152 *tp)
static void rtl8152_disable(struct r8152 *tp)
{
- struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
- struct sk_buff *skb;
u32 ocp_data;
int i;
@@ -1690,17 +1793,12 @@ static void rtl8152_disable(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- while ((skb = skb_dequeue(&tp->tx_queue))) {
- dev_kfree_skb(skb);
- stats->tx_dropped++;
- }
+ rtl_drop_queued_tx(tp);
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_kill_urb(tp->tx_info[i].urb);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -1721,6 +1819,198 @@ static void rtl8152_disable(struct r8152 *tp)
rtl8152_nic_reset(tp);
}
+static void r8152_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ if (enable)
+ ocp_data |= POWER_CUT;
+ else
+ ocp_data &= ~POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+
+}
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl_get_wol(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u32 wolopts = 0;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (!(ocp_data & LAN_WAKE_EN))
+ return 0;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ if (ocp_data & LINK_ON_WAKE_EN)
+ wolopts |= WAKE_PHY;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (ocp_data & UWF_EN)
+ wolopts |= WAKE_UCAST;
+ if (ocp_data & BWF_EN)
+ wolopts |= WAKE_BCAST;
+ if (ocp_data & MWF_EN)
+ wolopts |= WAKE_MCAST;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ if (ocp_data & MAGIC_EN)
+ wolopts |= WAKE_MAGIC;
+
+ return wolopts;
+}
+
+static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
+{
+ u32 ocp_data;
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_ON_WAKE_EN;
+ if (wolopts & WAKE_PHY)
+ ocp_data |= LINK_ON_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ if (wolopts & WAKE_UCAST)
+ ocp_data |= UWF_EN;
+ if (wolopts & WAKE_BCAST)
+ ocp_data |= BWF_EN;
+ if (wolopts & WAKE_MCAST)
+ ocp_data |= MWF_EN;
+ if (wolopts & WAKE_ANY)
+ ocp_data |= LAN_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data &= ~MAGIC_EN;
+ if (wolopts & WAKE_MAGIC)
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ if (wolopts & WAKE_ANY)
+ device_set_wakeup_enable(&tp->udev->dev, true);
+ else
+ device_set_wakeup_enable(&tp->udev->dev, false);
+}
+
+static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ u32 ocp_data;
+
+ __rtl_set_wol(tp, WAKE_ANY);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data |= LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ } else {
+ __rtl_set_wol(tp, tp->saved_wolopts);
+ }
+}
+
+static void rtl_phy_reset(struct r8152 *tp)
+{
+ u16 data;
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+
+ /* don't reset again before the previous one complete */
+ if (data & BMCR_RESET)
+ return;
+
+ data |= BMCR_RESET;
+ r8152_mdio_write(tp, MII_BMCR, data);
+
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+}
+
+static void rtl_clear_bp(struct r8152 *tp)
+{
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
+ mdelay(3);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
+}
+
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8152b_disable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
+ msleep(20);
+}
+
+static inline void r8152b_enable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
+ LINKENA | DIS_SDSAVE);
+}
+
+static void r8152b_hw_phy_cfg(struct r8152 *tp)
+{
+ u16 data;
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8152b_disable_aldps(tp);
+
+ rtl_clear_bp(tp);
+
+ r8152b_enable_aldps(tp);
+ set_bit(PHY_RESET, &tp->flags);
+}
+
static void r8152b_exit_oob(struct r8152 *tp)
{
u32 ocp_data;
@@ -1730,9 +2020,9 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
+ r8153_teredo_off(tp);
+ r8152b_hw_phy_cfg(tp);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -1838,10 +2128,6 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
ocp_data |= CPCR_RX_VLAN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
@@ -1854,36 +2140,26 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8152b_disable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
- msleep(20);
-}
-
-static inline void r8152b_enable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
- LINKENA | DIS_SDSAVE);
-}
-
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8153_clear_bp(tp);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -1919,9 +2195,11 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = sram_read(tp, SRAM_10M_AMP2);
data |= AMP_DN;
sram_write(tp, SRAM_10M_AMP2, data);
+
+ set_bit(PHY_RESET, &tp->flags);
}
-static void r8153_u1u2en(struct r8152 *tp, int enable)
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -1933,7 +2211,7 @@ static void r8153_u1u2en(struct r8152 *tp, int enable)
usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
}
-static void r8153_u2p3en(struct r8152 *tp, int enable)
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1945,7 +2223,7 @@ static void r8153_u2p3en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
}
-static void r8153_power_cut_en(struct r8152 *tp, int enable)
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1961,28 +2239,12 @@ static void r8153_power_cut_en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
-static void r8153_teredo_off(struct r8152 *tp)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
- ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
-}
-
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
int i;
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
-
+ rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -2075,10 +2337,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
ocp_data &= ~TEREDO_WAKE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
@@ -2095,11 +2353,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
@@ -2190,12 +2444,26 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (test_bit(PHY_RESET, &tp->flags))
+ bmcr |= BMCR_RESET;
+
if (tp->mii.supports_gmii)
r8152_mdio_write(tp, MII_CTRL1000, gbcr);
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
+ if (test_bit(PHY_RESET, &tp->flags)) {
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+ }
+
out:
return ret;
@@ -2203,12 +2471,7 @@ out:
static void rtl8152_down(struct r8152 *tp)
{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
+ r8152_power_cut_en(tp, false);
r8152b_disable_aldps(tp);
r8152b_enter_oob(tp);
r8152b_enable_aldps(tp);
@@ -2216,8 +2479,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
- r8153_u1u2en(tp, 0);
- r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, false);
+ r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
r8153_enter_oob(tp);
r8153_enable_aldps(tp);
@@ -2252,6 +2515,9 @@ static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
@@ -2264,8 +2530,14 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
+ if (tp->speed & LINK_STATUS)
+ tx_bottom(tp);
+
+ if (test_bit(PHY_RESET, &tp->flags))
+ rtl_phy_reset(tp);
+
out1:
- return;
+ usb_autopm_put_interface(tp->intf);
}
static int rtl8152_open(struct net_device *netdev)
@@ -2273,6 +2545,27 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ res = alloc_all_mem(tp);
+ if (res)
+ goto out;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ free_all_mem(tp);
+ goto out;
+ }
+
+ /* The WORK_ENABLE may be set when autoresume occurs */
+ if (test_bit(WORK_ENABLE, &tp->flags)) {
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ }
+
+ tp->rtl_ops.up(tp);
+
rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
@@ -2286,9 +2579,12 @@ static int rtl8152_open(struct net_device *netdev)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
+ free_all_mem(tp);
}
+ usb_autopm_put_interface(tp->intf);
+out:
return res;
}
@@ -2301,33 +2597,30 @@ static int rtl8152_close(struct net_device *netdev)
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
- tasklet_disable(&tp->tl);
- tp->rtl_ops.disable(tp);
- tasklet_enable(&tp->tl);
- return res;
-}
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ rtl_drop_queued_tx(tp);
+ } else {
+ /*
+ * The autosuspend may have been enabled and wouldn't
+ * be disable when autoresume occurs, because the
+ * netif_running() would be false.
+ */
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ }
-static void rtl_clear_bp(struct r8152 *tp)
-{
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
- mdelay(3);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
- ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ usb_autopm_put_interface(tp->intf);
+ }
-static void r8153_clear_bp(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
- rtl_clear_bp(tp);
+ free_all_mem(tp);
+
+ return res;
}
static void r8152b_enable_eee(struct r8152 *tp)
@@ -2378,18 +2671,9 @@ static void r8152b_enable_fc(struct r8152 *tp)
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
-static void r8152b_hw_phy_cfg(struct r8152 *tp)
-{
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
- r8152b_disable_aldps(tp);
-}
-
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
-
- rtl_clear_bp(tp);
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
@@ -2397,17 +2681,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
}
- r8152b_hw_phy_cfg(tp);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-
- r8152b_exit_oob(tp);
+ r8152_power_cut_en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
@@ -2424,14 +2698,6 @@ static void r8152b_init(struct r8152 *tp)
r8152b_enable_aldps(tp);
r8152b_enable_fc(tp);
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
- for (i = 0; i < 100; i++) {
- udelay(100);
- if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
- break;
- }
-
/* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~RX_AGG_DISABLE;
@@ -2443,7 +2709,7 @@ static void r8153_init(struct r8152 *tp)
u32 ocp_data;
int i;
- r8153_u1u2en(tp, 0);
+ r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
@@ -2459,14 +2725,12 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
- r8153_u2p3en(tp, 0);
+ r8153_u2p3en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
ocp_data &= ~TIMER11_EN;
ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
- r8153_clear_bp(tp);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
@@ -2484,10 +2748,8 @@ static void r8153_init(struct r8152 *tp)
ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
- r8153_power_cut_en(tp, 0);
- r8153_u1u2en(tp, 1);
-
- r8153_first_init(tp);
+ r8153_power_cut_en(tp, false);
+ r8153_u1u2en(tp, true);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
@@ -2502,26 +2764,30 @@ static void r8153_init(struct r8152 *tp)
r8153_enable_eee(tp);
r8153_enable_aldps(tp);
r8152b_enable_fc(tp);
-
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
- netif_device_detach(tp->netdev);
+ if (PMSG_IS_AUTO(message))
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ else
+ netif_device_detach(tp->netdev);
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
- tasklet_disable(&tp->tl);
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, true);
+ } else {
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ }
}
- tp->rtl_ops.down(tp);
-
return 0;
}
@@ -2529,22 +2795,77 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- tp->rtl_ops.init(tp);
- netif_device_attach(tp->netdev);
+ if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ tp->rtl_ops.init(tp);
+ netif_device_attach(tp->netdev);
+ }
+
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ } else {
+ tp->rtl_ops.up(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
+ }
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
- tasklet_enable(&tp->tl);
}
return 0;
}
+static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl_get_wol(tp);
+
+ usb_autopm_put_interface(tp->intf);
+}
+
+static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out_set_wol;
+
+ __rtl_set_wol(tp, wol->wolopts);
+ tp->saved_wolopts = wol->wolopts & WAKE_ANY;
+
+ usb_autopm_put_interface(tp->intf);
+
+out_set_wol:
+ return ret;
+}
+
+static u32 rtl8152_get_msglevel(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void rtl8152_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
static void rtl8152_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -2569,8 +2890,18 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
- return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static struct ethtool_ops ops = {
@@ -2578,13 +2909,21 @@ static struct ethtool_ops ops = {
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .get_msglevel = rtl8152_get_msglevel,
+ .set_msglevel = rtl8152_set_msglevel,
+ .get_wol = rtl8152_get_wol,
+ .set_wol = rtl8152_set_wol,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
struct r8152 *tp = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(rq);
- int res = 0;
+ int res;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0)
+ goto out;
switch (cmd) {
case SIOCGMIIPHY:
@@ -2607,6 +2946,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
res = -EOPNOTSUPP;
}
+ usb_autopm_put_interface(tp->intf);
+
+out:
return res;
}
@@ -2659,22 +3001,13 @@ static void r8152b_get_version(struct r8152 *tp)
static void rtl8152_unload(struct r8152 *tp)
{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+ if (tp->version != RTL_VER_01)
+ r8152_power_cut_en(tp, true);
}
static void rtl8153_unload(struct r8152 *tp)
{
- r8153_power_cut_en(tp, 1);
+ r8153_power_cut_en(tp, true);
}
static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -2689,6 +3022,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8152b_init;
ops->enable = rtl8152_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8152b_exit_oob;
ops->down = rtl8152_down;
ops->unload = rtl8152_unload;
ret = 0;
@@ -2697,6 +3031,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2712,6 +3047,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2775,14 +3111,12 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id = R8152_PHY_ID;
tp->mii.supports_gmii = 0;
+ intf->needs_remote_wakeup = 1;
+
r8152b_get_version(tp);
tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
- ret = alloc_all_mem(tp);
- if (ret)
- goto out;
-
usb_set_intfdata(intf, tp);
ret = register_netdev(netdev);
@@ -2791,6 +3125,12 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
+ tp->saved_wolopts = __rtl_get_wol(tp);
+ if (tp->saved_wolopts)
+ device_set_wakeup_enable(&udev->dev, true);
+ else
+ device_set_wakeup_enable(&udev->dev, false);
+
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2812,7 +3152,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
- free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -2835,6 +3174,8 @@ static struct usb_driver rtl8152_driver = {
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
.reset_resume = rtl8152_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
};
module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a48bc0f20c1a..524a47a28120 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f17b9e02dd34..d9e7892262fa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8dd54a0f7b29..424db65e4396 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 header, align_count;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 4175eb9fdeca..b94a0fbb8b3b 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -63,6 +63,10 @@ static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
int offset = 0;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (offset + sizeof(u32) < skb->len) {
struct sk_buff *sr_skb;
u16 size;
@@ -823,7 +827,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size =
SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
}
- netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__,
+ netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
dev->rx_urb_size);
return 0;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4671da755e7b..dd10d5817d2a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
}
// else network stack removes extra byte if we forced a short packet
- if (skb->len) {
- /* all data was already cloned from skb inside the driver */
- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- dev_kfree_skb_any(skb);
- else
- usbnet_skb_return(dev, skb);
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ goto done;
+
+ if (skb->len < ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
+ } else {
+ usbnet_skb_return(dev, skb);
return;
}
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
done:
skb_queue_tail(&dev->done, skb);
}
@@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb)
switch (urb_status) {
/* success */
case 0:
- if (skb->len < dev->net->hard_header_len) {
- state = rx_cleanup;
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- netif_dbg(dev, rx_err, dev->net,
- "rx length %d\n", skb->len);
- }
break;
/* stalls need manual reset. this is rare ... except that
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2ec2041b62d4..34b52638e12d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/veth.h>
@@ -235,18 +236,9 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- int i;
-
- dev->vstats = alloc_percpu(struct pcpu_vstats);
+ dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
if (!dev->vstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_vstats *veth_stats;
- veth_stats = per_cpu_ptr(dev->vstats, i);
- u64_stats_init(&veth_stats->syncp);
- }
-
return 0;
}
@@ -332,10 +324,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = nla_parse(peer_tb, IFLA_MAX,
- nla_data(nla_peer) + sizeof(struct ifinfomsg),
- nla_len(nla_peer) - sizeof(struct ifinfomsg),
- ifla_policy);
+ err = rtnl_nla_parse_ifla(peer_tb,
+ nla_data(nla_peer) + sizeof(struct ifinfomsg),
+ nla_len(nla_peer) - sizeof(struct ifinfomsg));
if (err < 0)
return err;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3be786faaaec..9275c8c423b1 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2729,47 +2729,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
/*
* Enable MSIx vectors.
* Returns :
- * 0 on successful enabling of required vectors,
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
- * could be enabled.
- * number of vectors which can be enabled otherwise (this number is smaller
+ * were enabled.
+ * number of vectors which were enabled otherwise (this number is greater
* than VMXNET3_LINUX_MIN_MSIX_VECT)
*/
static int
-vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
- int vectors)
-{
- int err = 0, vector_threshold;
- vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
-
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- vectors);
- if (!err) {
- adapter->intr.num_intrs = vectors;
- return 0;
- } else if (err < 0) {
- dev_err(&adapter->netdev->dev,
- "Failed to enable MSI-X, error: %d\n", err);
- vectors = 0;
- } else if (err < vector_threshold) {
- break;
- } else {
- /* If fails to enable required number of MSI-x vectors
- * try enabling minimum number of vectors required.
- */
- dev_err(&adapter->netdev->dev,
- "Failed to enable %d MSI-X, trying %d instead\n",
- vectors, vector_threshold);
- vectors = vector_threshold;
- }
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
+{
+ int ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries, nvec, nvec);
+
+ if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d\n",
+ nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries,
+ VMXNET3_LINUX_MIN_MSIX_VECT,
+ VMXNET3_LINUX_MIN_MSIX_VECT);
}
- dev_info(&adapter->pdev->dev,
- "Number of MSI-X interrupts which can be allocated "
- "is lower than min threshold required.\n");
- return err;
+ if (ret < 0) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable MSI-X, error: %d\n", ret);
+ }
+
+ return ret;
}
@@ -2796,56 +2784,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int vector, err = 0;
-
- adapter->intr.num_intrs = (adapter->share_intr ==
- VMXNET3_INTR_TXSHARE) ? 1 :
- adapter->num_tx_queues;
- adapter->intr.num_intrs += (adapter->share_intr ==
- VMXNET3_INTR_BUDDYSHARE) ? 0 :
- adapter->num_rx_queues;
- adapter->intr.num_intrs += 1; /* for link event */
-
- adapter->intr.num_intrs = (adapter->intr.num_intrs >
- VMXNET3_LINUX_MIN_MSIX_VECT
- ? adapter->intr.num_intrs :
- VMXNET3_LINUX_MIN_MSIX_VECT);
-
- for (vector = 0; vector < adapter->intr.num_intrs; vector++)
- adapter->intr.msix_entries[vector].entry = vector;
-
- err = vmxnet3_acquire_msix_vectors(adapter,
- adapter->intr.num_intrs);
+ int i, nvec;
+
+ nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
+ 0 : adapter->num_rx_queues;
+ nvec += 1; /* for link event */
+ nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
+ nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ for (i = 0; i < nvec; i++)
+ adapter->intr.msix_entries[i].entry = i;
+
+ nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec < 0)
+ goto msix_err;
+
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
- if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
netdev_err(adapter->netdev,
"Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
- adapter->intr.num_intrs =
- VMXNET3_LINUX_MIN_MSIX_VECT;
}
- return;
}
- if (!err)
- return;
+ adapter->intr.num_intrs = nvec;
+ return;
+
+msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
- "Limiting #rx queues to 1, try MSI.\n", err);
+ "Limiting #rx queues to 1, try MSI.\n", nvec);
adapter->intr.type = VMXNET3_IT_MSI;
}
if (adapter->intr.type == VMXNET3_IT_MSI) {
- int err;
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b0f705c2378f..dec9820bc182 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1978,19 +1978,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
- int i;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *vxlan_stats;
- vxlan_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&vxlan_stats->syncp);
- }
-
-
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index b59cfbe0276b..6260b834a86f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -161,6 +161,8 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
bool bt_ant_diversity;
+
+ int last_rssi;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 74f45fa6f428..27f20e0510f7 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -204,7 +204,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
break;
/* 80MHZ */
case 2:
- status->flag |= RX_FLAG_80MHZ;
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
@@ -266,7 +266,7 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->flag & RX_FLAG_80MHZ ? "80" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb61bfb..1a2973b7acf2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
if (ah->ah_version == AR5K_AR5210) {
- srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
} else {
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index fd4c89df67e1..c2c6f4604958 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
if (nw_type & ADHOC_NETWORK) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
if (vif->nw_type & ADHOC_NETWORK) {
- if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
- return;
- }
- memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
@@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret, rssi_thold;
+ int n_match_sets = request->n_match_sets;
+
+ /*
+ * If there's a matchset w/o an SSID, then assume it's just for
+ * the RSSI (nothing else is currently supported) and ignore it.
+ * The device only supports a global RSSI filter that we set below.
+ */
+ if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+ n_match_sets = 0;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids,
request->match_sets,
- request->n_match_sets);
+ n_match_sets);
if (ret < 0)
return ret;
- if (!request->n_match_sets) {
+ if (!n_match_sets) {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret < 0)
@@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
ar->fw_capabilities)) {
- if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+ if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
rssi_thold = 0;
- else if (request->rssi_thold < -127)
+ else if (request->min_rssi_thold < -127)
rssi_thold = -127;
else
- rssi_thold = request->rssi_thold;
+ rssi_thold = request->min_rssi_thold;
ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
rssi_thold);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b96b3e5712d..8fcc029a76a6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -120,18 +120,6 @@ config ATH9K_WOW
This option enables Wake on Wireless LAN support for certain cards.
Currently, AR9462 is supported.
-config ATH9K_LEGACY_RATE_CONTROL
- bool "Atheros ath9k rate control"
- depends on ATH9K
- default n
- ---help---
- Say Y, if you want to use the ath9k specific rate control
- module instead of minstrel_ht. Be warned that there are various
- issues with the ath9k RC and minstrel is a more robust algorithm.
- Note that even if this option is selected, "ath9k_rate_control"
- has to be passed to mac80211 using the module parameter,
- ieee80211_default_rc_algo.
-
config ATH9K_RFKILL
bool "Atheros ath9k rfkill support" if EXPERT
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index a40e5c5d7418..747975e1860a 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,6 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a352128c40ad..ac8301ef5242 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -23,10 +23,11 @@
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
#define MAX_PHS_DELTA 10
+#define MAXIQCAL 3
struct coeff {
- int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
- int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
+ int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+ int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
int iqc_coeff[2];
};
@@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
- if (i2_p_q2_a0_d1 > 0x1000)
- i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
-
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
@@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
@@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return true;
}
-static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
+static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
+ int nmeasurement,
int max_delta)
{
int mp_max = -64, max_idx = 0;
@@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
/* find min/max mismatch across all calibrated gains */
for (i = 0; i < nmeasurement; i++) {
- if (mp_coeff[i] > mp_max) {
- mp_max = mp_coeff[i];
+ if (mp_coeff[i][0] > mp_max) {
+ mp_max = mp_coeff[i][0];
max_idx = i;
- } else if (mp_coeff[i] < mp_min) {
- mp_min = mp_coeff[i];
+ } else if (mp_coeff[i][0] < mp_min) {
+ mp_min = mp_coeff[i][0];
min_idx = i;
}
}
/* find average (exclude max abs value) */
for (i = 0; i < nmeasurement; i++) {
- if ((abs(mp_coeff[i]) < abs(mp_max)) ||
- (abs(mp_coeff[i]) < abs(mp_min))) {
- mp_avg += mp_coeff[i];
+ if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
+ (abs(mp_coeff[i][0]) < abs(mp_min))) {
+ mp_avg += mp_coeff[i][0];
mp_count++;
}
}
@@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
if (mp_count)
mp_avg /= mp_count;
else
- mp_avg = mp_coeff[nmeasurement - 1];
+ mp_avg = mp_coeff[nmeasurement - 1][0];
/* detect outlier */
if (abs(mp_max - mp_min) > max_delta) {
@@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
else
outlier_idx = min_idx;
- mp_coeff[outlier_idx] = mp_avg;
+ mp_coeff[outlier_idx][0] = mp_avg;
}
}
-static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
- struct coeff *coeff,
- bool is_reusable)
+static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
+ struct coeff *coeff,
+ bool is_reusable)
{
int i, im, nmeasurement;
+ int magnitude, phase;
u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
struct ath9k_hw_cal_data *caldata = ah->caldata;
@@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
- /* detect outlier only if nmeasurement > 1 */
- if (nmeasurement > 1) {
- /* Detect magnitude outlier */
- ar9003_hw_detect_outlier(coeff->mag_coeff[i],
- nmeasurement, MAX_MAG_DELTA);
-
- /* Detect phase outlier */
- ar9003_hw_detect_outlier(coeff->phs_coeff[i],
- nmeasurement, MAX_PHS_DELTA);
+ /*
+ * Skip normal outlier detection for AR9550.
+ */
+ if (!AR_SREV_9550(ah)) {
+ /* detect outlier only if nmeasurement > 1 */
+ if (nmeasurement > 1) {
+ /* Detect magnitude outlier */
+ ar9003_hw_detect_outlier(coeff->mag_coeff[i],
+ nmeasurement,
+ MAX_MAG_DELTA);
+
+ /* Detect phase outlier */
+ ar9003_hw_detect_outlier(coeff->phs_coeff[i],
+ nmeasurement,
+ MAX_PHS_DELTA);
+ }
}
for (im = 0; im < nmeasurement; im++) {
+ magnitude = coeff->mag_coeff[i][im][0];
+ phase = coeff->phs_coeff[i][im][0];
- coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
- ((coeff->phs_coeff[i][im] & 0x7f) << 7);
+ coeff->iqc_coeff[0] =
+ (phase & 0x7f) | ((magnitude & 0x7f) << 7);
if ((im % 2) == 0)
REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
return true;
}
-static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
+ struct coeff *coeff,
+ int i, int nmeasurement)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int im, ix, iy, temp;
+
+ for (im = 0; im < nmeasurement; im++) {
+ for (ix = 0; ix < MAXIQCAL - 1; ix++) {
+ for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
+ if (coeff->mag_coeff[i][im][iy] <
+ coeff->mag_coeff[i][im][ix]) {
+ temp = coeff->mag_coeff[i][im][ix];
+ coeff->mag_coeff[i][im][ix] =
+ coeff->mag_coeff[i][im][iy];
+ coeff->mag_coeff[i][im][iy] = temp;
+ }
+ if (coeff->phs_coeff[i][im][iy] <
+ coeff->phs_coeff[i][im][ix]) {
+ temp = coeff->phs_coeff[i][im][ix];
+ coeff->phs_coeff[i][im][ix] =
+ coeff->phs_coeff[i][im][iy];
+ coeff->phs_coeff[i][im][iy] = temp;
+ }
+ }
+ }
+ coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
+ coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
+
+ ath_dbg(common, CALIBRATE,
+ "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
+ i, im,
+ coeff->mag_coeff[i][im][0],
+ coeff->phs_coeff[i][im][0]);
+ }
+}
+
+static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
+ struct coeff *coeff,
+ int iqcal_idx,
+ int nmeasurement)
+{
+ int i;
+
+ if ((iqcal_idx + 1) != MAXIQCAL)
+ return false;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
+ }
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
+ int iqcal_idx,
+ bool is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
AR_PHY_CHAN_INFO_TAB_1,
AR_PHY_CHAN_INFO_TAB_2,
};
- struct coeff coeff;
+ static struct coeff coeff;
s32 iq_res[6];
int i, im, j;
- int nmeasurement;
+ int nmeasurement = 0;
+ bool outlier_detect = true;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
@@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
goto tx_iqcal_fail;
}
- coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
- coeff.phs_coeff[i][im] =
+ coeff.phs_coeff[i][im][iqcal_idx] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.mag_coeff[i][im][iqcal_idx] =
(coeff.iqc_coeff[0] >> 7) & 0x7f;
- if (coeff.mag_coeff[i][im] > 63)
- coeff.mag_coeff[i][im] -= 128;
- if (coeff.phs_coeff[i][im] > 63)
- coeff.phs_coeff[i][im] -= 128;
+ if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
+ coeff.mag_coeff[i][im][iqcal_idx] -= 128;
+ if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
+ coeff.phs_coeff[i][im][iqcal_idx] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
+
+ if (AR_SREV_9550(ah))
+ outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
+ iqcal_idx, nmeasurement);
+ if (outlier_detect)
+ ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
return;
@@ -1409,7 +1481,7 @@ skip_tx_iqcal:
}
if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
@@ -1455,14 +1527,38 @@ skip_tx_iqcal:
return true;
}
+static bool do_ar9003_agc_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool status;
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms,"
+ "noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ return true;
+}
+
static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
- bool is_reusable = true, status = true;
+ bool status = true;
bool run_agc_cal = false, sep_iq_cal = false;
+ int i = 0;
/* Use chip chainmask only for calibration */
ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- txiqcal_done = true;
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
+ txiqcal_done = true;
+ } else {
+ txiqcal_done = false;
+ }
run_agc_cal = true;
} else {
sep_iq_cal = true;
@@ -1512,27 +1613,37 @@ skip_tx_iqcal:
if (AR_SREV_9330_11(ah))
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT);
- }
+ /*
+ * For non-AR9550 chips, we just trigger AGC calibration
+ * in the HW, poll for completion and then process
+ * the results.
+ *
+ * For AR955x, we run it multiple times and use
+ * median IQ correction.
+ */
+ if (!AR_SREV_9550(ah)) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
- if (!status) {
- ath_dbg(common, CALIBRATE,
- "offset calibration failed to complete in %d ms; noisy environment?\n",
- AH_WAIT_TIMEOUT / 1000);
- return false;
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
+ } else {
+ if (!txiqcal_done) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ } else {
+ for (i = 0; i < MAXIQCAL; i++) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
+ }
+ }
+ }
}
- if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b5ac32cfbeb8..21d13bc99c5a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -30,7 +30,6 @@
#include "spectral.h"
struct ath_node;
-struct ath_rate_table;
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
@@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
+enum {
+ WLAN_RC_PHY_OFDM,
+ WLAN_RC_PHY_CCK,
+};
+
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -442,7 +446,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
-bool ath9k_csa_is_finished(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_csa_update(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@@ -757,7 +762,6 @@ struct ath_softc {
#endif
struct ath9k_hw_cal_data caldata;
- int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
@@ -774,7 +778,6 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
- struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2e8bba0eb361..32d00e8cfd0c 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
-bool ath9k_csa_is_finished(struct ath_softc *sc)
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif;
-
- vif = sc->csa_vif;
if (!vif || !vif->csa_active)
return false;
@@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
return false;
ieee80211_csa_finish(vif);
-
- sc->csa_vif = NULL;
return true;
}
+static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ ath9k_csa_is_finished(sc, vif);
+}
+
+void ath9k_csa_update(struct ath_softc *sc)
+{
+ ieee80211_iterate_active_interfaces(sc->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath9k_csa_update_vif,
+ sc);
+}
+
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
- /* EDMA devices check that in the tx completion function. */
- if (!edma && ath9k_csa_is_finished(sc))
- return;
-
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma && ath9k_csa_is_finished(sc, vif))
+ return;
+
if (!vif || !vif->bss_conf.enable_beacon)
return;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 768c733cad31..c6dd7f1fed65 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
+/* Assumes you've already done the endian to CPU conversion */
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter)
+{
+ struct ath_hw *ah = common->ah;
+ bool is_mc, is_valid_tkip, strip_mic, mic_error;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+ is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+ strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+ ieee80211_has_protected(fc) &&
+ !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
+
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
+ /*
+ * The rx_stats->rs_status will not be set until the end of the
+ * chained descriptors so it can be ignored if rs_more is set. The
+ * rs_more will be false at the last element of the chained
+ * descriptors.
+ */
+ if (rx_stats->rs_status != 0) {
+ u8 status_mask;
+
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ mic_error = false;
+ }
+
+ if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+ (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
+ *decrypt_error = true;
+ mic_error = false;
+ }
+
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS;
+
+ if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
+ status_mask |= ATH9K_RXERR_CRC;
+
+ if (rx_stats->rs_status & ~status_mask)
+ return false;
+ }
+
+ /*
+ * For unicast frames the MIC error bit can have false positives,
+ * so all MIC error reports need to be validated in software.
+ * False negatives are not common, so skip software verification
+ * if the hardware considers the MIC valid.
+ */
+ if (strip_mic)
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ else if (is_mc && mic_error)
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_accept);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+ struct ath_hw *ah = common->ah;
+
+ band = ah->curchan->chan->band;
+ sband = hw->wiphy->bands[band];
+
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_5MHZ;
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_10MHZ;
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ rxs->flag |= rx_stats->flag;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
+ }
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->rate_idx = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rate);
+
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ath_hw *ah = common->ah;
+ int last_rssi;
+ int rssi = rx_stats->rs_rssi;
+ int i, j;
+
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ /*
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
+ */
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
+ last_rssi = common->last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
+
+ ah->stats.avgbrssi = rssi;
+ }
+
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rssi);
+
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index eb85e1bdca88..38b5609a4018 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -42,6 +42,25 @@
#define ATH_EP_RND(x, mul) \
(((x) + ((mul)/2)) / (mul))
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter);
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error);
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cc7a025d833e..559a68c2709c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -18,7 +18,6 @@
#define DEBUG_H
#include "hw.h"
-#include "rc.h"
#include "dfs_debug.h"
struct ath_txq;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 0a7ddf4c88c9..7936c9126a20 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -21,6 +21,8 @@
#include "hw.h"
+struct ath_softc;
+
/**
* struct ath_dfs_stats - DFS Statistics per wiphy
* @pulses_total: pulses reported by HW
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6d5d716adc1b..8e7153b186ed 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* SMC Networks */
{ USB_DEVICE(0x0411, 0x017f),
.driver_info = AR9280_USB }, /* Sony UWA-BR100 */
+ { USB_DEVICE(0x0411, 0x0197),
+ .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
{ USB_DEVICE(0x04da, 0x3904),
.driver_info = AR9280_USB },
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 99a203174f45..ba83f582bf4a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -277,7 +277,6 @@ struct ath9k_htc_rxbuf {
};
struct ath9k_htc_rx {
- int last_rssi; /* FIXME: per-STA */
struct list_head rxbuf;
spinlock_t rxbuflock;
};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c57d6b859c04..8d0b9bcb47b4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -611,6 +611,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c9254a61ca52..90dad4172b0a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1474,6 +1474,7 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
common->curaid = bss_conf->aid;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 12e0f32a4905..47b2bfcd8223 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -927,43 +927,39 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
- priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
-static void ath9k_process_rate(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *rxs,
- u8 rx_rate, u8 rs_flags)
+static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
-
- if (rx_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- if (rs_flags & ATH9K_RX_2040)
- rxs->flag |= RX_FLAG_40MHZ;
- if (rs_flags & ATH9K_RX_GI)
- rxs->flag |= RX_FLAG_SHORT_GI;
- rxs->rate_idx = rx_rate & 0x7f;
- return;
- }
-
- band = hw->conf.chandef.chan->band;
- sband = hw->wiphy->bands[band];
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_rate) {
- rxs->rate_idx = i;
- return;
- }
- if (sband->bitrates[i].hw_value_short == rx_rate) {
- rxs->rate_idx = i;
- rxs->flag |= RX_FLAG_SHORTPRE;
- return;
- }
- }
+ rx_stats->flag = 0;
+ if (rxstatus->rs_flags & ATH9K_RX_2040)
+ rx_stats->flag |= RX_FLAG_40MHZ;
+ if (rxstatus->rs_flags & ATH9K_RX_GI)
+ rx_stats->flag |= RX_FLAG_SHORT_GI;
+}
+static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
+{
+ rx_stats->rs_datalen = rxstatus->rs_datalen;
+ rx_stats->rs_status = rxstatus->rs_status;
+ rx_stats->rs_phyerr = rxstatus->rs_phyerr;
+ rx_stats->rs_rssi = rxstatus->rs_rssi;
+ rx_stats->rs_keyix = rxstatus->rs_keyix;
+ rx_stats->rs_rate = rxstatus->rs_rate;
+ rx_stats->rs_antenna = rxstatus->rs_antenna;
+ rx_stats->rs_more = rxstatus->rs_more;
+
+ memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
+ sizeof(rx_stats->rs_rssi_ctl));
+ memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
+ sizeof(rx_stats->rs_rssi_ext));
+
+ rx_stats->rs_isaggr = rxstatus->rs_isaggr;
+ rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
+ rx_stats->rs_num_delims = rxstatus->rs_num_delims;
+ convert_htc_flag(rx_stats, rxstatus);
}
static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@@ -975,10 +971,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb = rxbuf->skb;
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_hw *ah = common->ah;
struct ath_htc_rx_status *rxstatus;
- int hdrlen, padsize;
- int last_rssi = ATH_RSSI_DUMMY_MARKER;
- __le16 fc;
+ struct ath_rx_status rx_stats;
+ bool decrypt_error;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -999,103 +995,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_htc_err_stat_rx(priv, rxstatus);
/* Get the RX status information */
- memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
- skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-
- padsize = hdrlen & 3;
- if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rxbuf->rxstatus.rs_status != 0) {
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
- goto rx_next;
-
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
- /* FIXME */
- } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
- if (ieee80211_is_ctl(fc))
- /*
- * Sometimes, we get invalid
- * MIC failures on valid control frames.
- * Remove these mic errors.
- */
- rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
- else
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_CRC))
- goto rx_next;
- } else {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
- goto rx_next;
- }
- }
- }
-
- if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
- u8 keyix;
- keyix = rxbuf->rxstatus.rs_keyix;
- if (keyix != ATH9K_RXKEYIX_INVALID) {
- rx_status->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc) &&
- skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
- if (test_bit(keyix, common->keymap))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- }
- }
-
- ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
- rxbuf->rxstatus.rs_flags);
-
- if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
- !rxbuf->rxstatus.rs_moreaggr)
- ATH_RSSI_LPF(priv->rx.last_rssi,
- rxbuf->rxstatus.rs_rssi);
-
- last_rssi = priv->rx.last_rssi;
+ /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
+ * After this, we can drop this part of skb. */
+ rx_status_htc_to_ath(&rx_stats, rxstatus);
+ rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
- if (ath_is_mybeacon(common, hdr)) {
- s8 rssi = rxbuf->rxstatus.rs_rssi;
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
+ &decrypt_error, priv->rxfilter))
+ goto rx_next;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
+ rx_status, decrypt_error);
- if (rssi < 0)
- rssi = 0;
+ if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
+ goto rx_next;
- priv->ah->stats.avgbrssi = rssi;
- }
+ rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
+ ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
- rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
- rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats.rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
return true;
-
rx_next:
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index aac4a406a513..a0ff5b637054 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -358,6 +358,36 @@ ret:
kfree_skb(skb);
}
+static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+ struct sk_buff *skb)
+{
+ uint32_t *pattern = (uint32_t *)skb->data;
+
+ switch (*pattern) {
+ case 0x33221199:
+ {
+ struct htc_panic_bad_vaddr *htc_panic;
+ htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
+ htc_panic->exccause, htc_panic->pc,
+ htc_panic->badvaddr);
+ break;
+ }
+ case 0x33221299:
+ {
+ struct htc_panic_bad_epid *htc_panic;
+ htc_panic = (struct htc_panic_bad_epid *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "bad epid: 0x%08x\n", htc_panic->epid);
+ break;
+ }
+ default:
+ dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+ break;
+ }
+}
+
/*
* HTC Messages are handled directly here and the obtained SKB
* is freed.
@@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
+ if (epid == 0x99) {
+ ath9k_htc_fw_panic_report(htc_handle, skb);
+ kfree_skb(skb);
+ return;
+ }
+
if (epid >= ENDPOINT_MAX) {
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index e1ffbb6bd636..06474ccc7696 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
u8 credits;
} __packed;
+struct htc_panic_bad_vaddr {
+ __be32 pattern;
+ __be32 exccause;
+ __be32 pc;
+ __be32 badvaddr;
+} __packed;
+
+struct htc_panic_bad_epid {
+ __be32 pattern;
+ __be32 epid;
+} __packed;
+
struct htc_ep_callbacks {
void *priv;
void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 11eab9f01fd8..5db01b4212c8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,7 +23,6 @@
#include "hw.h"
#include "hw-ops.h"
-#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1fc2e5a26b52..07a0315dd2f6 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -538,7 +538,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -1106,19 +1106,11 @@ static int __init ath9k_init(void)
{
int error;
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- pr_err("Unable to register rate control algorithm: %d\n",
- error);
- goto err_out;
- }
-
error = ath_pci_init();
if (error < 0) {
pr_err("No PCI devices found, driver not installed\n");
error = -ENODEV;
- goto err_rate_unregister;
+ goto err_out;
}
error = ath_ahb_init();
@@ -1131,9 +1123,6 @@ static int __init ath9k_init(void)
err_pci_exit:
ath_pci_exit();
-
- err_rate_unregister:
- ath_rate_control_unregister();
err_out:
return error;
}
@@ -1144,7 +1133,6 @@ static void __exit ath9k_exit(void)
is_ath9k_unloaded = true;
ath_ahb_exit();
ath_pci_exit();
- ath_rate_control_unregister();
pr_info("%s: Driver unloaded\n", dev_info);
}
module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 10271373a0cd..89df634e81f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -155,12 +155,8 @@ struct ath_htc_rx_status {
u8 rs_status;
u8 rs_phyerr;
int8_t rs_rssi;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_keyix;
u8 rs_rate;
u8 rs_antenna;
@@ -170,6 +166,7 @@ struct ath_htc_rx_status {
u8 rs_num_delims;
u8 rs_flags;
u8 rs_dummy;
+ /* FIXME: evm* never used? */
__be32 evm0;
__be32 evm1;
__be32 evm2;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5924f72dd493..afce549a097b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1178,9 +1178,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
- if (sc->csa_vif == vif)
- sc->csa_vif = NULL;
-
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
@@ -1609,7 +1606,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -1866,7 +1863,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static bool ath9k_has_tx_pending(struct ath_softc *sc)
{
- int i, npend;
+ int i, npend = 0;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -2086,13 +2083,8 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct ath_softc *sc = hw->priv;
-
- /* mac80211 does not support CSA in multi-if cases (yet) */
- if (WARN_ON(sc->csa_vif))
- return;
-
- sc->csa_vif = vif;
+ /* depend on vif->csa_active only */
+ return;
}
struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
deleted file mode 100644
index d829bb62a3fc..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2004-2011 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "ath9k.h"
-
-static const struct ath_rate_table ar5416_11na_ratetable = {
- 68,
- 8, /* MCS start */
- {
- [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
- 5400, 0, 12 }, /* 6 Mb */
- [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
- 7800, 1, 18 }, /* 9 Mb */
- [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10000, 2, 24 }, /* 12 Mb */
- [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 13900, 3, 36 }, /* 18 Mb */
- [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17300, 4, 48 }, /* 24 Mb */
- [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23000, 5, 72 }, /* 36 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 6, 96 }, /* 48 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 29300, 7, 108 }, /* 54 Mb */
- [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb */
- [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 75 Mb */
- [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb*/
- [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb*/
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb*/
- [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb*/
- [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb*/
- [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb*/
- [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-/* 4ms frame limit not used for NG mode. The values filled
- * for HT are the 64K max aggregate limit */
-
-static const struct ath_rate_table ar5416_11ng_ratetable = {
- 72,
- 12, /* MCS start */
- {
- [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
- 900, 0, 2 }, /* 1 Mb */
- [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
- 1900, 1, 4 }, /* 2 Mb */
- [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
- 4900, 2, 11 }, /* 5.5 Mb */
- [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
- 8100, 3, 22 }, /* 11 Mb */
- [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
- 5400, 4, 12 }, /* 6 Mb */
- [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
- 7800, 5, 18 }, /* 9 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10100, 6, 24 }, /* 12 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 14100, 7, 36 }, /* 18 Mb */
- [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17700, 8, 48 }, /* 24 Mb */
- [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23700, 9, 72 }, /* 36 Mb */
- [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 10, 96 }, /* 48 Mb */
- [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 30900, 11, 108 }, /* 54 Mb */
- [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb*/
- [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 65 Mb*/
- [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb */
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb */
- [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb */
- [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb */
- [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb */
- [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb */
- [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11a_ratetable = {
- 8,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 0, 12},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 1, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 2, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 3, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 4, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 5, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 6, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 7, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11g_ratetable = {
- 12,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
- 900, 0, 2},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
- 1900, 1, 4},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
- 4900, 2, 11},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
- 8100, 3, 22},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 4, 12},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 5, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 6, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 7, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 8, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 9, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 10, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 11, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_rate *rate)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int rix, i, idx = 0;
-
- if (!(rate->flags & IEEE80211_TX_RC_MCS))
- return rate->idx;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
- idx = ath_rc_priv->valid_rate_index[i];
-
- if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
- rate_table->info[idx].ratecode == rate->idx)
- break;
- }
-
- rix = idx;
-
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rix++;
-
- return rix;
-}
-
-static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, j, idx, idx_next;
-
- for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
- for (j = 0; j <= i-1; j++) {
- idx = ath_rc_priv->valid_rate_index[j];
- idx_next = ath_rc_priv->valid_rate_index[j+1];
-
- if (rate_table->info[idx].ratekbps >
- rate_table->info[idx_next].ratekbps) {
- ath_rc_priv->valid_rate_index[j] = idx_next;
- ath_rc_priv->valid_rate_index[j+1] = idx;
- }
- }
- }
-}
-
-static inline
-int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate,
- u8 *next_idx)
-{
- u8 i;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i+1];
- return 1;
- }
- }
-
- /* No more valid rates */
- *next_idx = 0;
-
- return 0;
-}
-
-/* Return true only for single stream */
-
-static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
-{
- if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
- return 0;
- if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
- return 0;
- if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
- return 0;
- if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
- return 0;
- if (!ignore_cw && WLAN_RC_PHY_HT(phy))
- if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
- return 0;
- return 1;
-}
-
-static inline int
-ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate, u8 *next_idx)
-{
- int8_t i;
-
- for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i-1];
- return 1;
- }
- }
-
- return 0;
-}
-
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, hi = 0;
-
- for (i = 0; i < rate_table->rate_cnt; i++) {
- if (rate_table->info[i].rate_flags & RC_LEGACY) {
- u32 phy = rate_table->info[i].phy;
- u8 valid_rate_count = 0;
-
- if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
-
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[i] = true;
- hi = i;
- }
- }
-
- return hi;
-}
-
-static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
- return false;
-
- if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
- return false;
-
- if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset;
- u32 phy, capflag = ath_rc_priv->ht_cap;
- u16 rate_flags;
- u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
-
- if (legacy)
- rateset = &ath_rc_priv->neg_rates;
- else
- rateset = &ath_rc_priv->neg_ht_rates;
-
- for (i = 0; i < rateset->rs_nrates; i++) {
- for (j = 0; j < rate_table->rate_cnt; j++) {
- phy = rate_table->info[j].phy;
- rate_flags = rate_table->info[j].rate_flags;
- rate = rateset->rs_rates[i];
- dot11rate = rate_table->info[j].dot11rate;
-
- if (legacy &&
- !ath_rc_check_legacy(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!legacy &&
- !ath_rc_check_ht(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!ath_rc_valid_phyrate(phy, capflag, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[j] = true;
- hi = max(hi, j);
- }
- }
-
- return hi;
-}
-
-static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
- int *is_probing)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u32 best_thruput, this_thruput, now_msec;
- u8 rate, next_rate, best_rate, maxindex, minindex;
- int8_t index = 0;
-
- now_msec = jiffies_to_msecs(jiffies);
- *is_probing = 0;
- best_thruput = 0;
- maxindex = ath_rc_priv->max_valid_rate-1;
- minindex = 0;
- best_rate = minindex;
-
- /*
- * Try the higher rate first. It will reduce memory moving time
- * if we have very good channel characteristics.
- */
- for (index = maxindex; index >= minindex ; index--) {
- u8 per_thres;
-
- rate = ath_rc_priv->valid_rate_index[index];
- if (rate > ath_rc_priv->rate_max_phy)
- continue;
-
- /*
- * For TCP the average collision rate is around 11%,
- * so we ignore PERs less than this. This is to
- * prevent the rate we are currently using (whose
- * PER might be in the 10-15 range because of TCP
- * collisions) looking worse than the next lower
- * rate whose PER has decayed close to 0. If we
- * used to next lower rate, its PER would grow to
- * 10-15 and we would be worse off then staying
- * at the current rate.
- */
- per_thres = ath_rc_priv->per[rate];
- if (per_thres < 12)
- per_thres = 12;
-
- this_thruput = rate_table->info[rate].user_ratekbps *
- (100 - per_thres);
-
- if (best_thruput <= this_thruput) {
- best_thruput = this_thruput;
- best_rate = rate;
- }
- }
-
- rate = best_rate;
-
- /*
- * Must check the actual rate (ratekbps) to account for
- * non-monoticity of 11g's rate table
- */
-
- if (rate >= ath_rc_priv->rate_max_phy) {
- rate = ath_rc_priv->rate_max_phy;
-
- /* Probe the next allowed phy state */
- if (ath_rc_get_nextvalid_txrate(rate_table,
- ath_rc_priv, rate, &next_rate) &&
- (now_msec - ath_rc_priv->probe_time >
- rate_table->probe_interval) &&
- (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
- rate = next_rate;
- ath_rc_priv->probe_rate = rate;
- ath_rc_priv->probe_time = now_msec;
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- *is_probing = 1;
- }
- }
-
- if (rate > (ath_rc_priv->rate_table_size - 1))
- rate = ath_rc_priv->rate_table_size - 1;
-
- if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
- return rate;
-
- if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
- return rate;
-
- if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
- return rate;
-
- /* This should not happen */
- WARN_ON_ONCE(1);
-
- rate = ath_rc_priv->valid_rate_index[0];
-
- return rate;
-}
-
-static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
- struct ieee80211_tx_rate *rate,
- struct ieee80211_tx_rate_control *txrc,
- u8 tries, u8 rix, int rtsctsenable)
-{
- rate->count = tries;
- rate->idx = rate_table->info[rix].ratecode;
-
- if (txrc->rts || rtsctsenable)
- rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
-
- if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
- rate->flags |= IEEE80211_TX_RC_MCS;
- if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
- conf_is_ht40(&txrc->hw->conf))
- rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
- rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- }
-}
-
-static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ieee80211_tx_info *tx_info)
-{
- struct ieee80211_bss_conf *bss_conf;
-
- if (!tx_info->control.vif)
- return;
- /*
- * For legacy frames, mac80211 takes care of CTS protection.
- */
- if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
- return;
-
- bss_conf = &tx_info->control.vif->bss_conf;
-
- if (!bss_conf->basic_rates)
- return;
-
- /*
- * For now, use the lowest allowed basic rate for HT frames.
- */
- tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
-}
-
-static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- const struct ath_rate_table *rate_table;
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->control.rates;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix;
- int is_probe = 0;
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- /*
- * For Multi Rate Retry we use a different number of
- * retry attempt counts. This ends up looking like this:
- *
- * MRR[0] = 4
- * MRR[1] = 4
- * MRR[2] = 4
- * MRR[3] = 8
- *
- */
- try_per_rate = 4;
-
- rate_table = ath_rc_priv->rate_table;
- rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
- tx_info->flags |= IEEE80211_TX_CTL_LDPC;
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
- tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
-
- if (is_probe) {
- /*
- * Set one try for probe rates. For the
- * probes don't enable RTS.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, rix, 0);
- /*
- * Get the next tried/allowed rate.
- * No RTS for the next series after the probe rate.
- */
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
-
- tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
- } else {
- /*
- * Set the chosen rate. No RTS for first series entry.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
- }
-
- for ( ; i < 4; i++) {
- /*
- * Use twice the number of tries for the last MRR segment.
- */
- if (i + 1 == 4)
- try_per_rate = 8;
-
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
-
- /*
- * All other rates in the series have RTS enabled.
- */
- ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, rix, 1);
- }
-
- /*
- * NB:Change rate series to enable aggregation when operating
- * at lower MCS rates. When first rate in series is MCS2
- * in HT40 @ 2.4GHz, series should look like:
- *
- * {MCS2, MCS1, MCS0, MCS0}.
- *
- * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
- * look like:
- *
- * {MCS3, MCS2, MCS1, MCS1}
- *
- * So, set fourth rate in series to be same as third one for
- * above conditions.
- */
- if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
- (conf_is_ht(&sc->hw->conf))) {
- u8 dot11rate = rate_table->info[rix].dot11rate;
- u8 phy = rate_table->info[rix].phy;
- if (i == 4 &&
- ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
- (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
- rates[3].idx = rates[2].idx;
- rates[3].flags = rates[2].flags;
- }
- }
-
- /*
- * Force hardware to use computed duration for next
- * fragment by disabling multi-rate retry, which
- * updates duration based on the multi-rate duration table.
- *
- * FIXME: Fix duration
- */
- if (ieee80211_has_morefrags(fc) ||
- (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
- rates[1].count = rates[2].count = rates[3].count = 0;
- rates[1].idx = rates[2].idx = rates[3].idx = 0;
- rates[0].count = ATH_TXMAXTRY;
- }
-
- ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
-}
-
-static void ath_rc_update_per(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries,
- u32 now_msec)
-{
- int count, n_bad_frames;
- u8 last_per;
- static const u32 nretry_to_per_lookup[10] = {
- 100 * 0 / 1,
- 100 * 1 / 4,
- 100 * 1 / 2,
- 100 * 3 / 4,
- 100 * 4 / 5,
- 100 * 5 / 6,
- 100 * 6 / 7,
- 100 * 7 / 8,
- 100 * 8 / 9,
- 100 * 9 / 10
- };
-
- last_per = ath_rc_priv->per[tx_rate];
- n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
-
- if (xretries) {
- if (xretries == 1) {
- ath_rc_priv->per[tx_rate] += 30;
- if (ath_rc_priv->per[tx_rate] > 100)
- ath_rc_priv->per[tx_rate] = 100;
- } else {
- /* xretries == 2 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) + (100 >> 3));
- }
-
- /* xretries == 1 or 2 */
-
- if (ath_rc_priv->probe_rate == tx_rate)
- ath_rc_priv->probe_rate = 0;
-
- } else { /* xretries == 0 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- if (n_bad_frames) {
- /* new_PER = 7/8*old_PER + 1/8*(currentPER)
- * Assuming that n_frames is not 0. The current PER
- * from the retries is 100 * retries / (retries+1),
- * since the first retries attempts failed, and the
- * next one worked. For the one that worked,
- * n_bad_frames subframes out of n_frames wored,
- * so the PER for that part is
- * 100 * n_bad_frames / n_frames, and it contributes
- * 100 * n_bad_frames / (n_frames * (retries+1)) to
- * the above PER. The expression below is a
- * simplified version of the sum of these two terms.
- */
- if (tx_info->status.ampdu_len > 0) {
- int n_frames, n_bad_tries;
- u8 cur_per, new_per;
-
- n_bad_tries = retries * tx_info->status.ampdu_len +
- n_bad_frames;
- n_frames = tx_info->status.ampdu_len * (retries + 1);
- cur_per = (100 * n_bad_tries / n_frames) >> 3;
- new_per = (u8)(last_per - (last_per >> 3) + cur_per);
- ath_rc_priv->per[tx_rate] = new_per;
- }
- } else {
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) +
- (nretry_to_per_lookup[retries] >> 3));
- }
-
-
- /*
- * If we got at most one retry then increase the max rate if
- * this was a probe. Otherwise, ignore the probe.
- */
- if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
- if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
- /*
- * Since we probed with just a single attempt,
- * any retries means the probe failed. Also,
- * if the attempt worked, but more than half
- * the subframes were bad then also consider
- * the probe a failure.
- */
- ath_rc_priv->probe_rate = 0;
- } else {
- u8 probe_rate = 0;
-
- ath_rc_priv->rate_max_phy =
- ath_rc_priv->probe_rate;
- probe_rate = ath_rc_priv->probe_rate;
-
- if (ath_rc_priv->per[probe_rate] > 30)
- ath_rc_priv->per[probe_rate] = 20;
-
- ath_rc_priv->probe_rate = 0;
-
- /*
- * Since this probe succeeded, we allow the next
- * probe twice as soon. This allows the maxRate
- * to move up faster if the probes are
- * successful.
- */
- ath_rc_priv->probe_time =
- now_msec - rate_table->probe_interval / 2;
- }
- }
-
- if (retries > 0) {
- /*
- * Don't update anything. We don't know if
- * this was because of collisions or poor signal.
- */
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- } else {
- /*
- * It worked with no retries. First ignore bogus (small)
- * rssi_ack values.
- */
- if (tx_rate == ath_rc_priv->rate_max_phy &&
- ath_rc_priv->hw_maxretry_pktcnt < 255) {
- ath_rc_priv->hw_maxretry_pktcnt++;
- }
-
- }
- }
-}
-
-static void ath_rc_update_ht(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries)
-{
- u32 now_msec = jiffies_to_msecs(jiffies);
- int rate;
- u8 last_per;
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int size = ath_rc_priv->rate_table_size;
-
- if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
- return;
-
- last_per = ath_rc_priv->per[tx_rate];
-
- /* Update PER first */
- ath_rc_update_per(sc, rate_table, ath_rc_priv,
- tx_info, tx_rate, xretries,
- retries, now_msec);
-
- /*
- * If this rate looks bad (high PER) then stop using it for
- * a while (except if we are probing).
- */
- if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
- rate_table->info[tx_rate].ratekbps <=
- rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
- ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
- &ath_rc_priv->rate_max_phy);
-
- /* Don't probe for a little while. */
- ath_rc_priv->probe_time = now_msec;
- }
-
- /* Make sure the rates below this have lower PER */
- /* Monotonicity is kept only for rates below the current rate. */
- if (ath_rc_priv->per[tx_rate] < last_per) {
- for (rate = tx_rate - 1; rate >= 0; rate--) {
-
- if (ath_rc_priv->per[rate] >
- ath_rc_priv->per[rate+1]) {
- ath_rc_priv->per[rate] =
- ath_rc_priv->per[rate+1];
- }
- }
- }
-
- /* Maintain monotonicity for rates above the current rate */
- for (rate = tx_rate; rate < size - 1; rate++) {
- if (ath_rc_priv->per[rate+1] <
- ath_rc_priv->per[rate])
- ath_rc_priv->per[rate+1] =
- ath_rc_priv->per[rate];
- }
-
- /* Every so often, we reduce the thresholds
- * and PER (different for CCK and OFDM). */
- if (now_msec - ath_rc_priv->per_down_time >=
- rate_table->probe_interval) {
- for (rate = 0; rate < size; rate++) {
- ath_rc_priv->per[rate] =
- 7 * ath_rc_priv->per[rate] / 8;
- }
-
- ath_rc_priv->per_down_time = now_msec;
- }
-
- ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
- ath_rc_priv->per[tx_rate]);
-
-}
-
-static void ath_rc_tx_status(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->status.rates;
- struct ieee80211_tx_rate *rate;
- int final_ts_idx = 0, xretries = 0, long_retry = 0;
- u8 flags;
- u32 i = 0, rix;
-
- for (i = 0; i < sc->hw->max_rates; i++) {
- rate = &tx_info->status.rates[i];
- if (rate->idx < 0 || !rate->count)
- break;
-
- final_ts_idx = i;
- long_retry = rate->count - 1;
- }
-
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
- xretries = 1;
-
- /*
- * If the first rate is not the final index, there
- * are intermediate rate failures to be processed.
- */
- if (final_ts_idx != 0) {
- for (i = 0; i < final_ts_idx ; i++) {
- if (rates[i].count != 0 && (rates[i].idx >= 0)) {
- flags = rates[i].flags;
-
- /* If HT40 and we have switched mode from
- * 40 to 20 => don't update */
-
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info,
- rix, xretries ? 1 : 2,
- rates[i].count);
- }
- }
- }
-
- flags = rates[final_ts_idx].flags;
-
- /* If HT40 and we have switched mode from 40 to 20 => don't update */
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
- ath_debug_stat_rc(ath_rc_priv, rix);
-}
-
-static const
-struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
- enum ieee80211_band band,
- bool is_ht)
-{
- switch(band) {
- case IEEE80211_BAND_2GHZ:
- if (is_ht)
- return &ar5416_11ng_ratetable;
- return &ar5416_11g_ratetable;
- case IEEE80211_BAND_5GHZ:
- if (is_ht)
- return &ar5416_11na_ratetable;
- return &ar5416_11a_ratetable;
- default:
- return NULL;
- }
-}
-
-static void ath_rc_init(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 i, j, k, hi = 0, hthi = 0;
-
- ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
-
- for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
- ath_rc_priv->per[i] = 0;
- ath_rc_priv->valid_rate_index[i] = 0;
- }
-
- for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < RATE_TABLE_SIZE; j++)
- ath_rc_priv->valid_phy_rateidx[i][j] = 0;
- ath_rc_priv->valid_phy_ratecnt[i] = 0;
- }
-
- if (!rateset->rs_nrates) {
- hi = ath_rc_init_validrates(ath_rc_priv);
- } else {
- hi = ath_rc_setvalid_rates(ath_rc_priv, true);
-
- if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
- hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
-
- hi = max(hi, hthi);
- }
-
- ath_rc_priv->rate_table_size = hi + 1;
- ath_rc_priv->rate_max_phy = 0;
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-
- for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
- ath_rc_priv->valid_rate_index[k++] =
- ath_rc_priv->valid_phy_rateidx[i][j];
- }
-
- if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
- !ath_rc_priv->valid_phy_ratecnt[i])
- continue;
-
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
- }
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
- WARN_ON(k > RATE_TABLE_SIZE);
-
- ath_rc_priv->max_valid_rate = k;
- ath_rc_sort_validrates(ath_rc_priv);
- ath_rc_priv->rate_max_phy = (k > 4) ?
- ath_rc_priv->valid_rate_index[k-4] :
- ath_rc_priv->valid_rate_index[k-1];
-
- ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
- ath_rc_priv->ht_cap);
-}
-
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
-{
- u8 caps = 0;
-
- if (sta->ht_cap.ht_supported) {
- caps = WLAN_RC_HT_FLAG;
- if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
- caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
- else if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
- caps |= WLAN_RC_40_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- caps |= WLAN_RC_SGI_FLAG;
- } else {
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- caps |= WLAN_RC_SGI_FLAG;
- }
- }
-
- return caps;
-}
-
-static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
- u8 tidno)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- struct ath_atx_tid *txtid;
-
- if (!sta->ht_cap.ht_supported)
- return false;
-
- txtid = ATH_AN_2_TID(an, tidno);
- return !txtid->active;
-}
-
-
-/***********************************/
-/* mac80211 Rate Control callbacks */
-/***********************************/
-
-static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
-
- if (!priv_sta || !ieee80211_is_data(fc))
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
- return;
-
- ath_rc_tx_status(sc, ath_rc_priv, skb);
-
- /* Check if aggregation has to be enabled for this tid */
- if (conf_is_ht(&sc->hw->conf) &&
- !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- if (ieee80211_is_data_qos(fc) &&
- skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
- u8 *qc, tid;
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
-
- if(ath_tx_aggr_check(sc, sta, tid))
- ieee80211_start_tx_ba_session(sta, tid, 0);
- }
- }
-}
-
-static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta)
-{
- struct ath_softc *sc = priv;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- int i, j = 0;
- u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sta->supp_rates[sband->band] & BIT(i)) {
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
-
- ath_rc_priv->neg_rates.rs_rates[j]
- = (sband->bitrates[i].bitrate * 2) / 10;
- j++;
- }
- }
- ath_rc_priv->neg_rates.rs_nrates = j;
-
- if (sta->ht_cap.ht_supported) {
- for (i = 0, j = 0; i < 77; i++) {
- if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
- ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
- if (j == ATH_RATE_MAX)
- break;
- }
- ath_rc_priv->neg_ht_rates.rs_nrates = j;
- }
-
- ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
- sta->ht_cap.ht_supported);
- if (!ath_rc_priv->rate_table) {
- ath_err(common, "No rate table chosen\n");
- return;
- }
-
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-}
-
-static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
-
- if (changed & IEEE80211_RC_BW_CHANGED) {
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-
- ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating Bandwidth changed to: %d\n",
- sc->hw->conf.chandef.width);
- }
-}
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
- struct ath_rc_stats *stats;
-
- stats = &rc->rcstats[final_rate];
- stats->success++;
-}
-
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
- struct ath_rc_stats *stats = &rc->rcstats[rix];
-
- stats->xretries += xretries;
- stats->retries += retries;
- stats->per = per;
-}
-
-static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_rate_priv *rc = file->private_data;
- char *buf;
- unsigned int len = 0, max;
- int rix;
- ssize_t retval;
-
- if (rc->rate_table == NULL)
- return 0;
-
- max = 80 + rc->rate_table_size * 1024 + 1;
- buf = kmalloc(max, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += sprintf(buf, "%6s %6s %6s "
- "%10s %10s %10s %10s\n",
- "HT", "MCS", "Rate",
- "Success", "Retries", "XRetries", "PER");
-
- for (rix = 0; rix < rc->max_valid_rate; rix++) {
- u8 i = rc->valid_rate_index[rix];
- u32 ratekbps = rc->rate_table->info[i].ratekbps;
- struct ath_rc_stats *stats = &rc->rcstats[i];
- char mcs[5];
- char htmode[5];
- int used_mcs = 0, used_htmode = 0;
-
- if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = scnprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
-
- if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT40");
- else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT20");
- else
- used_htmode = scnprintf(htmode, 5, "????");
- }
-
- mcs[used_mcs] = '\0';
- htmode[used_htmode] = '\0';
-
- len += scnprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
- }
-
- if (len > max)
- len = max;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return retval;
-}
-
-static const struct file_operations fops_rcstat = {
- .read = read_file_rcstat,
- .open = simple_open,
- .owner = THIS_MODULE
-};
-
-static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct ath_rate_priv *rc = priv_sta;
- rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
- dir, rc, &fops_rcstat);
-}
-
-static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct ath_rate_priv *rc = priv_sta;
- debugfs_remove(rc->debugfs_rcstats);
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
-
-static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-static void ath_rate_free(void *priv)
-{
- return;
-}
-
-static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- return kzalloc(sizeof(struct ath_rate_priv), gfp);
-}
-
-static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct ath_rate_priv *rate_priv = priv_sta;
- kfree(rate_priv);
-}
-
-static struct rate_control_ops ath_rate_ops = {
- .module = NULL,
- .name = "ath9k_rate_control",
- .tx_status = ath_tx_status,
- .get_rate = ath_get_rate,
- .rate_init = ath_rate_init,
- .rate_update = ath_rate_update,
- .alloc = ath_rate_alloc,
- .free = ath_rate_free,
- .alloc_sta = ath_rate_alloc_sta,
- .free_sta = ath_rate_free_sta,
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- .add_sta_debugfs = ath_rate_add_sta_debugfs,
- .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
-#endif
-};
-
-int ath_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&ath_rate_ops);
-}
-
-void ath_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&ath_rate_ops);
-}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
deleted file mode 100644
index b9a87383cb43..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2004 Sam Leffler, Errno Consulting
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2008-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef RC_H
-#define RC_H
-
-#include "hw.h"
-
-struct ath_softc;
-
-#define ATH_RATE_MAX 30
-#define RATE_TABLE_SIZE 72
-
-#define RC_INVALID 0x0000
-#define RC_LEGACY 0x0001
-#define RC_SS 0x0002
-#define RC_DS 0x0004
-#define RC_TS 0x0008
-#define RC_HT_20 0x0010
-#define RC_HT_40 0x0020
-
-#define RC_STREAM_MASK 0xe
-#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
- (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
-#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
-#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
-
-#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
-#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
-#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
-#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
-#define RC_HT_S_20 (RC_HT_20 | RC_SS)
-#define RC_HT_D_20 (RC_HT_20 | RC_DS)
-#define RC_HT_T_20 (RC_HT_20 | RC_TS)
-#define RC_HT_S_40 (RC_HT_40 | RC_SS)
-#define RC_HT_D_40 (RC_HT_40 | RC_DS)
-#define RC_HT_T_40 (RC_HT_40 | RC_TS)
-
-#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
-#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
-#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
-#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
-
-#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
-#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
-
-#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
-#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
-
-#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
-
-enum {
- WLAN_RC_PHY_OFDM,
- WLAN_RC_PHY_CCK,
- WLAN_RC_PHY_HT_20_SS,
- WLAN_RC_PHY_HT_20_DS,
- WLAN_RC_PHY_HT_20_TS,
- WLAN_RC_PHY_HT_40_SS,
- WLAN_RC_PHY_HT_40_DS,
- WLAN_RC_PHY_HT_40_TS,
- WLAN_RC_PHY_HT_20_SS_HGI,
- WLAN_RC_PHY_HT_20_DS_HGI,
- WLAN_RC_PHY_HT_20_TS_HGI,
- WLAN_RC_PHY_HT_40_SS_HGI,
- WLAN_RC_PHY_HT_40_DS_HGI,
- WLAN_RC_PHY_HT_40_TS_HGI,
- WLAN_RC_PHY_MAX
-};
-
-#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
-#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
-#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-
-#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
-
-#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
- ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
-
-#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
- (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
-
-/* Return TRUE if flag supports HT20 && client supports HT20 or
- * return TRUE if flag supports HT40 && client supports HT40.
- * This is used becos some rates overlap between HT20/HT40.
- */
-#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
- (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
- ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
-
-#define WLAN_RC_DS_FLAG (0x01)
-#define WLAN_RC_TS_FLAG (0x02)
-#define WLAN_RC_40_FLAG (0x04)
-#define WLAN_RC_SGI_FLAG (0x08)
-#define WLAN_RC_HT_FLAG (0x10)
-
-/**
- * struct ath_rate_table - Rate Control table
- * @rate_cnt: total number of rates for the given wireless mode
- * @mcs_start: MCS rate index offset
- * @rate_flags: Rate Control flags
- * @phy: CCK/OFDM/HT20/HT40
- * @ratekbps: rate in Kbits per second
- * @user_ratekbps: user rate in Kbits per second
- * @ratecode: rate that goes into HW descriptors
- * @dot11rate: value that goes into supported
- * rates info element of MLME
- * @ctrl_rate: Index of next lower basic rate, used for duration computation
- * @cw40index: Index of rates having 40MHz channel width
- * @sgi_index: Index of rates having Short Guard Interval
- * @ht_index: high throughput rates having 40MHz channel width and
- * Short Guard Interval
- * @probe_interval: interval for rate control to probe for other rates
- * @initial_ratemax: initial ratemax value
- */
-struct ath_rate_table {
- int rate_cnt;
- int mcs_start;
- struct {
- u16 rate_flags;
- u8 phy;
- u32 ratekbps;
- u32 user_ratekbps;
- u8 ratecode;
- u8 dot11rate;
- } info[RATE_TABLE_SIZE];
- u32 probe_interval;
- u8 initial_ratemax;
-};
-
-struct ath_rateset {
- u8 rs_nrates;
- u8 rs_rates[ATH_RATE_MAX];
-};
-
-struct ath_rc_stats {
- u32 success;
- u32 retries;
- u32 xretries;
- u8 per;
-};
-
-/**
- * struct ath_rate_priv - Rate Control priv data
- * @state: RC state
- * @probe_rate: rate we are probing at
- * @probe_time: msec timestamp for last probe
- * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
- * @max_valid_rate: maximum number of valid rate
- * @per_down_time: msec timestamp for last PER down step
- * @valid_phy_ratecnt: valid rate count
- * @rate_max_phy: phy index for the max rate
- * @per: PER for every valid rate in %
- * @probe_interval: interval for ratectrl to probe for other rates
- * @ht_cap: HT capabilities
- * @neg_rates: Negotatied rates
- * @neg_ht_rates: Negotiated HT rates
- */
-struct ath_rate_priv {
- u8 rate_table_size;
- u8 probe_rate;
- u8 hw_maxretry_pktcnt;
- u8 max_valid_rate;
- u8 valid_rate_index[RATE_TABLE_SIZE];
- u8 ht_cap;
- u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
- u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
- u8 rate_max_phy;
- u8 per[RATE_TABLE_SIZE];
- u32 probe_time;
- u32 per_down_time;
- u32 probe_interval;
- struct ath_rateset neg_rates;
- struct ath_rateset neg_ht_rates;
- const struct ath_rate_table *rate_table;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- struct dentry *debugfs_rcstats;
- struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
-#endif
-};
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per);
-#else
-static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-}
-static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
-}
-#endif
-
-#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
-int ath_rate_control_register(void);
-void ath_rate_control_unregister(void);
-#else
-static inline int ath_rate_control_register(void)
-{
- return 0;
-}
-
-static inline void ath_rate_control_unregister(void)
-{
-}
-#endif
-
-#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a0ebdd000fc2..076dae1e5ab7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -755,204 +755,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return bf;
}
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
- struct ieee80211_hdr *hdr,
- struct ieee80211_rx_status *rxs,
- struct ath_rx_status *rx_stats,
- bool *decrypt_error)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- bool is_mc, is_valid_tkip, strip_mic, mic_error;
- struct ath_hw *ah = common->ah;
- __le16 fc;
-
- fc = hdr->frame_control;
-
- is_mc = !!is_multicast_ether_addr(hdr->addr1);
- is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap);
- strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
- ieee80211_has_protected(fc) &&
- !(rx_stats->rs_status &
- (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS));
-
- /*
- * Key miss events are only relevant for pairwise keys where the
- * descriptor does contain a valid key index. This has been observed
- * mostly with CCMP encryption.
- */
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
- !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
- rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
-
- mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
- !ieee80211_has_morefrags(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- (rx_stats->rs_status & ATH9K_RXERR_MIC);
-
- /*
- * The rx_stats->rs_status will not be set until the end of the
- * chained descriptors so it can be ignored if rs_more is set. The
- * rs_more will be false at the last element of the chained
- * descriptors.
- */
- if (rx_stats->rs_status != 0) {
- u8 status_mask;
-
- if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
- rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
- mic_error = false;
- }
-
- if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
- (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
- *decrypt_error = true;
- mic_error = false;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS;
-
- if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
- status_mask |= ATH9K_RXERR_CRC;
-
- if (rx_stats->rs_status & ~status_mask)
- return false;
- }
-
- /*
- * For unicast frames the MIC error bit can have false positives,
- * so all MIC error reports need to be validated in software.
- * False negatives are not common, so skip software verification
- * if the hardware considers the MIC valid.
- */
- if (strip_mic)
- rxs->flag |= RX_FLAG_MMIC_STRIPPED;
- else if (is_mc && mic_error)
- rxs->flag |= RX_FLAG_MMIC_ERROR;
-
- return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
- struct ath_softc __maybe_unused *sc = common->priv;
- struct ath_hw *ah = sc->sc_ah;
-
- band = ah->curchan->chan->band;
- sband = hw->wiphy->bands[band];
-
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_5MHZ;
- else if (IS_CHAN_HALF_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_10MHZ;
-
- if (rx_stats->rs_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- rxs->flag |= rx_stats->flag;
- rxs->rate_idx = rx_stats->rs_rate & 0x7f;
- return 0;
- }
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
- rxs->rate_idx = i;
- return 0;
- }
- if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
- rxs->flag |= RX_FLAG_SHORTPRE;
- rxs->rate_idx = i;
- return 0;
- }
- }
-
- /*
- * No valid hardware bitrate found -- we should not get here
- * because hardware has already validated this frame as OK.
- */
- ath_dbg(common, ANY,
- "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
- rx_stats->rs_rate);
- RX_STAT_INC(rx_rate_err);
- return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = common->ah;
- int last_rssi;
- int rssi = rx_stats->rs_rssi;
- int i, j;
-
- /*
- * RSSI is not available for subframes in an A-MPDU.
- */
- if (rx_stats->rs_moreaggr) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- /*
- * Check if the RSSI for the last subframe in an A-MPDU
- * or an unaggregated frame is valid.
- */
- if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
- s8 rssi;
-
- if (!(ah->rxchainmask & BIT(i)))
- continue;
-
- rssi = rx_stats->rs_rssi_ctl[i];
- if (rssi != ATH9K_RSSI_BAD) {
- rxs->chains |= BIT(j);
- rxs->chain_signal[j] = ah->noise + rssi;
- }
- j++;
- }
-
- /*
- * Update Beacon RSSI, this is used by ANI.
- */
- if (rx_stats->is_mybeacon &&
- ((ah->opmode == NL80211_IFTYPE_STATION) ||
- (ah->opmode == NL80211_IFTYPE_ADHOC))) {
- ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
- last_rssi = sc->last_rssi;
-
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (rssi < 0)
- rssi = 0;
-
- ah->stats.avgbrssi = rssi;
- }
-
- rxs->signal = ah->noise + rx_stats->rs_rssi;
-}
-
static void ath9k_process_tsf(struct ath_rx_status *rs,
struct ieee80211_rx_status *rxs,
u64 tsf)
@@ -1051,7 +853,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter)) {
ret = -EINVAL;
goto exit;
}
@@ -1069,12 +871,19 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
goto exit;
}
- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+ if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
+ ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
+ rx_stats->rs_rate);
+ RX_STAT_INC(rx_rate_err);
ret =-EINVAL;
goto exit;
}
- ath9k_process_rssi(common, hw, rx_stats, rx_status);
+ ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
rx_status->band = ah->curchan->chan->band;
rx_status->freq = ah->curchan->chan->center_freq;
@@ -1092,57 +901,6 @@ exit:
return ret;
}
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ieee80211_hdrlen(fc);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
-
- keyix = rx_stats->rs_keyix;
-
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-
/*
* Run the LNA combining algorithm only in these cases:
*
@@ -1292,8 +1050,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_pull(skb, ah->caps.rx_status_len);
if (!rs.rs_more)
- ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
- rxs, decrypt_error);
+ ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(rx_frags);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0a75e2f68c9d..a6507046dfe8 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2566,7 +2566,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
- ath9k_csa_is_finished(sc);
+ ath9k_csa_update(sc);
continue;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index ee25786b4447..73f12f196f14 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
writel(data, wcn->mmio + addr);
}
+#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
+do { \
+ if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
+ else \
+ wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
+} while (0) \
+
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
*data = readl(wcn->mmio + addr);
@@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/* Setting interrupt path */
reg_data = WCN36XX_DXE_CCU_INT;
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+ wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index c88562f85de1..35ee7e966bd2 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
*/
/* DXE registers */
-#define WCN36XX_DXE_MEM_BASE 0x03000000
#define WCN36XX_DXE_MEM_REG 0x202000
#define WCN36XX_DXE_CCU_INT 0xA0011
-#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
/* TODO This must calculated properly but not hardcoded */
#define WCN36XX_DXE_CTRL_TX_L 0x328a44
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 3c2ef0c32f72..a1f1127d7808 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
MAX_FEATURE_SUPPORTED = 128,
};
+#define WCN36XX_HAL_CAPS_SIZE 4
+
struct wcn36xx_hal_feat_caps_msg {
struct wcn36xx_hal_msg_header header;
- u32 feat_caps[4];
+ u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
} __packed;
/* status codes to help debug rekey failures */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e64a6784079e..4ab5370ab7a6 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "wcn36xx.h"
@@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
+static const char * const wcn36xx_caps_names[] = {
+ "MCC", /* 0 */
+ "P2P", /* 1 */
+ "DOT11AC", /* 2 */
+ "SLM_SESSIONIZATION", /* 3 */
+ "DOT11AC_OPMODE", /* 4 */
+ "SAP32STA", /* 5 */
+ "TDLS", /* 6 */
+ "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
+ "WLANACTIVE_OFFLOAD", /* 8 */
+ "BEACON_OFFLOAD", /* 9 */
+ "SCAN_OFFLOAD", /* 10 */
+ "ROAM_OFFLOAD", /* 11 */
+ "BCN_MISS_OFFLOAD", /* 12 */
+ "STA_POWERSAVE", /* 13 */
+ "STA_ADVANCED_PWRSAVE", /* 14 */
+ "AP_UAPSD", /* 15 */
+ "AP_DFS", /* 16 */
+ "BLOCKACK", /* 17 */
+ "PHY_ERR", /* 18 */
+ "BCN_FILTER", /* 19 */
+ "RTT", /* 20 */
+ "RATECTRL", /* 21 */
+ "WOW" /* 22 */
+};
+
+static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_caps_names[x];
+}
+
+static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
+{
+ int i;
+
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (get_feat_caps(wcn->fw_feat_caps, i))
+ wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
+ }
+}
+
+static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
+{
+ if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
+ wcn36xx_info("Chip is 3680\n");
+ wcn->chip_version = WCN36XX_CHIP_3680;
+ } else {
+ wcn36xx_info("Chip is 3660\n");
+ wcn->chip_version = WCN36XX_CHIP_3660;
+ }
+}
+
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_smd_buf;
}
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ else
+ wcn36xx_feat_caps_info(wcn);
+ }
+
+ wcn36xx_detect_chip_version(wcn);
+
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_debugfs_init(wcn);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_feature_caps_exchange(wcn);
- if (ret)
- wcn36xx_warn("Exchange feature caps failed\n");
- }
INIT_LIST_HEAD(&wcn->vif_list);
return 0;
@@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->enable_beacon);
if (bss_conf->enable_beacon) {
+ vif_priv->dtim_period = bss_conf->dtim_period;
vif_priv->bss_index = 0xff;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
@@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+ release_firmware(wcn->nv);
mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 750626b0e22d..7bf0ef8a1f56 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret = 0;
+ unsigned long start;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
init_completion(&wcn->hal_rsp_compl);
+ start = jiffies;
ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
@@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
}
if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
- wcn36xx_err("Timeout while waiting SMD response\n");
+ wcn36xx_err("Timeout! No SMD response in %dms\n",
+ HAL_MSG_TIMEOUT);
ret = -ETIME;
goto out;
}
+ wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
+ jiffies_to_msecs(jiffies - start));
out:
return ret;
}
@@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
- const struct firmware *nv;
struct nv_data *nv_d;
struct wcn36xx_hal_nv_img_download_req_msg msg_body;
int fw_bytes_left;
int ret;
u16 fm_offset = 0;
- ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
- if (ret) {
- wcn36xx_err("Failed to load nv file %s: %d\n",
- WLAN_NV_FILE, ret);
- goto out_free_nv;
+ if (!wcn->nv) {
+ ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out;
+ }
}
- nv_d = (struct nv_data *)nv->data;
+ nv_d = (struct nv_data *)wcn->nv->data;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
do {
- fw_bytes_left = nv->size - fm_offset - 4;
+ fw_bytes_left = wcn->nv->size - fm_offset - 4;
if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
msg_body.last_fragment = 0;
msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
out_unlock:
mutex_unlock(&wcn->hal_mutex);
-out_free_nv:
- release_firmware(nv);
-
- return ret;
+out: return ret;
}
static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
sta_priv->sta_index = params->sta_index;
sta_priv->dpu_desc_index = params->dpu_index;
+ sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
params->status, params->sta_index, params->bssid_index,
- params->p2p);
+ params->uc_ucast_sig, params->p2p);
return 0;
}
@@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
}
- priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+ priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
- wcn36xx_err("Sending hal_exit_bmps failed\n");
+ wcn36xx_err("Sending hal_keep_alive failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
- wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
goto out;
}
out:
@@ -1682,8 +1686,7 @@ out:
return ret;
}
-static inline void set_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
bitmap[arr_idx] |= (1 << bit_idx);
}
-static inline int get_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
int ret = 0;
@@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
return ret;
}
-static inline void clear_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
- struct wcn36xx_hal_feat_caps_msg msg_body;
- int ret = 0;
+ struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
+ int ret = 0, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
- if (ret) {
- wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
- ret);
+ if (wcn->hal_rsp_len != sizeof(*rsp)) {
+ wcn36xx_err("Invalid hal_feature_caps_exchange response");
goto out;
}
+
+ rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
+
+ for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
+ wcn->fw_feat_caps[i] = rsp->feat_caps[i];
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index e7c39019c6f1..008d03423dbf 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
-#define HAL_MSG_TIMEOUT 200
+#define HAL_MSG_TIMEOUT 500
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index b2b60e30caaf..32bb26a0db2a 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
- status.flag, status.vendor_radiotap_len);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct ieee80211_vif,
drv_priv);
+ bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
bd->sta_index = sta_priv->bss_sta_index;
bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
- bd->dpu_sign = __vif_priv->ucast_dpu_signature;
-
if (ieee80211_is_nullfunc(hdr->frame_control) ||
(sta_priv && !sta_priv->is_data_encrypted))
bd->dpu_ne = 1;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 8fa5cbace5ab..f0fb81dfd17b 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_vif {
enum wcn36xx_power_state pw_state;
u8 bss_index;
- u8 ucast_dpu_signature;
/* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
u8 self_sta_index;
u8 self_dpu_desc_index;
+ u8 self_ucast_dpu_sign;
};
/**
@@ -159,6 +159,7 @@ struct wcn36xx_sta {
u16 tid;
u8 sta_index;
u8 dpu_desc_index;
+ u8 ucast_dpu_sign;
u8 bss_sta_index;
u8 bss_dpu_desc_index;
bool is_data_encrypted;
@@ -171,10 +172,14 @@ struct wcn36xx {
struct device *dev;
struct list_head vif_list;
+ const struct firmware *nv;
+
u8 fw_revision;
u8 fw_version;
u8 fw_minor;
u8 fw_major;
+ u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
+ u32 chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -222,6 +227,9 @@ struct wcn36xx {
};
+#define WCN36XX_CHIP_3660 0
+#define WCN36XX_CHIP_3680 1
+
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eeceab39cda2..e1c8cc4a4b92 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -41,30 +41,28 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
switch (use_msi) {
case 3:
case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ break;
case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
break;
default:
- wil_err(wil, "Invalid use_msi=%d, default to 1\n",
- use_msi);
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
use_msi = 1;
}
- wil->n_msi = use_msi;
- if (wil->n_msi) {
- wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- if (rc && (wil->n_msi == 3)) {
- wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
- wil->n_msi = 1;
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- }
- if (rc) {
- wil_err(wil, "pci_enable_msi failed, use INTx\n");
- wil->n_msi = 0;
- }
- } else {
- wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+
+ if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ use_msi = 1;
+ }
+
+ if (use_msi == 1 && pci_enable_msi(pdev)) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ use_msi = 0;
}
+ wil->n_msi = use_msi;
+
rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
goto stop_master;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index abac25ee958d..f476fc337d64 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -58,41 +58,6 @@ enum b43_verbosity {
#endif
};
-
-/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
-static inline u8 b43_freq_to_channel_5ghz(int freq)
-{
- return ((freq - 5000) / 5);
-}
-static inline u8 b43_freq_to_channel_2ghz(int freq)
-{
- u8 channel;
-
- if (freq == 2484)
- channel = 14;
- else
- channel = (freq - 2407) / 5;
-
- return channel;
-}
-
-/* Lightweight function to convert a channel number to a frequency (in Mhz). */
-static inline int b43_channel_to_freq_5ghz(u8 channel)
-{
- return (5000 + (5 * channel));
-}
-static inline int b43_channel_to_freq_2ghz(u8 channel)
-{
- int freq;
-
- if (channel == 14)
- freq = 2484;
- else
- freq = 2407 + (5 * channel);
-
- return freq;
-}
-
static inline int b43_is_cck_rate(int rate)
{
return (rate == B43_CCK_RATE_1MB ||
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 50e5ddb12fb3..218a0f37af46 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
- status.freq = b43_channel_to_freq_5ghz(chanid);
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
@@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
case B43_PHYTYPE_HT:
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
- if (chanstat & B43_RX_CHAN_5GHZ) {
+ if (chanstat & B43_RX_CHAN_5GHZ)
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_channel_to_freq_5ghz(chanid);
- } else {
+ else
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_channel_to_freq_2ghz(chanid);
- }
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
default:
B43_WARN_ON(1);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 57cddee03252..1d2ceac3a221 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
+ chip.o \
fwil.o \
fweh.o \
fwsignal.o \
@@ -36,8 +37,7 @@ brcmfmac-objs += \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
- bcmsdh.o \
- sdio_chip.o
+ bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index fa35b23bbaa7..07e7d2520257 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -43,7 +43,6 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
-#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -827,7 +826,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
}
if (!write)
memcpy(data, pkt->data, dsize);
- skb_trim(pkt, dsize);
+ skb_trim(pkt, 0);
/* Adjust for next transfer (if any) */
size -= dsize;
@@ -1115,11 +1114,12 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.remove = brcmf_ops_sdio_remove,
.name = BRCMFMAC_SDIO_PDATA_NAME,
.id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
.drv = {
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
.pm = &brcmf_sdio_pm_ops,
- },
#endif /* CONFIG_PM_SLEEP */
+ },
};
static int brcmf_sdio_pd_probe(struct platform_device *pdev)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644
index 000000000000..a07b95ef9e70
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+
+#include <defs.h>
+#include <soc.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <chipcommon.h>
+#include "dhd_dbg.h"
+#include "chip.h"
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* PL-368 DMP definitions */
+#define DMP_DESC_TYPE_MSK 0x0000000F
+#define DMP_DESC_EMPTY 0x00000000
+#define DMP_DESC_VALID 0x00000001
+#define DMP_DESC_COMPONENT 0x00000001
+#define DMP_DESC_MASTER_PORT 0x00000003
+#define DMP_DESC_ADDRESS 0x00000005
+#define DMP_DESC_ADDRSIZE_GT32 0x00000008
+#define DMP_DESC_EOT 0x0000000F
+
+#define DMP_COMP_DESIGNER 0xFFF00000
+#define DMP_COMP_DESIGNER_S 20
+#define DMP_COMP_PARTNUM 0x000FFF00
+#define DMP_COMP_PARTNUM_S 8
+#define DMP_COMP_CLASS 0x000000F0
+#define DMP_COMP_CLASS_S 4
+#define DMP_COMP_REVISION 0xFF000000
+#define DMP_COMP_REVISION_S 24
+#define DMP_COMP_NUM_SWRAP 0x00F80000
+#define DMP_COMP_NUM_SWRAP_S 19
+#define DMP_COMP_NUM_MWRAP 0x0007C000
+#define DMP_COMP_NUM_MWRAP_S 14
+#define DMP_COMP_NUM_SPORT 0x00003E00
+#define DMP_COMP_NUM_SPORT_S 9
+#define DMP_COMP_NUM_MPORT 0x000001F0
+#define DMP_COMP_NUM_MPORT_S 4
+
+#define DMP_MASTER_PORT_UID 0x0000FF00
+#define DMP_MASTER_PORT_UID_S 8
+#define DMP_MASTER_PORT_NUM 0x000000F0
+#define DMP_MASTER_PORT_NUM_S 4
+
+#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
+#define DMP_SLAVE_ADDR_BASE_S 12
+#define DMP_SLAVE_PORT_NUM 0x00000F00
+#define DMP_SLAVE_PORT_NUM_S 8
+#define DMP_SLAVE_TYPE 0x000000C0
+#define DMP_SLAVE_TYPE_S 6
+#define DMP_SLAVE_TYPE_SLAVE 0
+#define DMP_SLAVE_TYPE_BRIDGE 1
+#define DMP_SLAVE_TYPE_SWRAP 2
+#define DMP_SLAVE_TYPE_MWRAP 3
+#define DMP_SLAVE_SIZE_TYPE 0x00000030
+#define DMP_SLAVE_SIZE_TYPE_S 4
+#define DMP_SLAVE_SIZE_4K 0
+#define DMP_SLAVE_SIZE_8K 1
+#define DMP_SLAVE_SIZE_16K 2
+#define DMP_SLAVE_SIZE_DESC 3
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+/* bcm43143 */
+/* SDIO device core */
+#define BCM43143_CORE_BUS_BASE 0x18002000
+/* internal memory core */
+#define BCM43143_CORE_SOCRAM_BASE 0x18004000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM43143_CORE_ARM_BASE 0x18003000
+#define BCM43143_RAMSIZE 0x70000
+
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+struct brcmf_core_priv {
+ struct brcmf_core pub;
+ u32 wrapbase;
+ struct list_head list;
+ struct brcmf_chip_priv *chip;
+};
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+struct brcmf_chip_priv {
+ struct brcmf_chip pub;
+ const struct brcmf_buscore_ops *ops;
+ void *ctx;
+ /* assured first core is chipcommon, second core is buscore */
+ struct list_head cores;
+ u16 num_cores;
+
+ bool (*iscoreup)(struct brcmf_core_priv *core);
+ void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset);
+ void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
+ u32 postreset);
+};
+
+static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
+ struct brcmf_core *core)
+{
+ u32 regdata;
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
+ core->rev = SBCOREREV(regdata);
+}
+
+static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 address;
+
+ ci = core->chip;
+ address = CORE_SB(core->pub.base, sbtmstatelow);
+ regdata = ci->ops->read32(ci->ctx, address);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return SSB_TMSLOW_CLOCK == regdata;
+}
+
+static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ bool ret;
+
+ ci = core->chip;
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 val, base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if (val & SSB_TMSLOW_RESET)
+ return;
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if ((val & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ val | SSB_TMSLOW_REJECT);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
+ & SSB_TMSHIGH_BUSY), 100000);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (val & SSB_TMSHIGH_BUSY)
+ brcmf_err("core state still busy\n");
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val |= SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate)) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val &= ~SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+
+ ci = core->chip;
+
+ /* if core is already in reset, just return */
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ /* configure reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ /* put in reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET);
+ usleep_range(10, 20);
+
+ /* wait till reset is 1 */
+ SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
+ /* in-reset configure */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_chip_sb_coredisable(core, 0, 0);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_RESET);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* clear any serror */
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (regdata & SSB_TMSHIGH_SERR)
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
+ regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* leave clock enabled */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ int count;
+
+ ci = core->chip;
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_chip_ai_coredisable(core, prereset, reset);
+
+ count = 0;
+ while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static char *brcmf_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
+ u16 coreid, u32 base,
+ u32 wrapbase)
+{
+ struct brcmf_core_priv *core;
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return ERR_PTR(-ENOMEM);
+
+ core->pub.id = coreid;
+ core->pub.base = base;
+ core->chip = ci;
+ core->wrapbase = wrapbase;
+
+ list_add_tail(&core->list, &ci->cores);
+ return &core->pub;
+}
+
+#ifdef DEBUG
+/* safety check for chipinfo */
+static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core_priv *core;
+ bool need_socram = false;
+ bool has_socram = false;
+ int idx = 1;
+
+ list_for_each_entry(core, &ci->cores, list) {
+ brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
+ idx++, core->pub.id, core->pub.rev, core->pub.base,
+ core->wrapbase);
+
+ switch (core->pub.id) {
+ case BCMA_CORE_ARM_CM3:
+ need_socram = true;
+ break;
+ case BCMA_CORE_INTERNAL_MEM:
+ has_socram = true;
+ break;
+ case BCMA_CORE_ARM_CR4:
+ if (ci->pub.rambase == 0) {
+ brcmf_err("RAM base not provided with ARM CR4 core\n");
+ return -ENOMEM;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* check RAM core presence for ARM CM3 core */
+ if (need_socram && !has_socram) {
+ brcmf_err("RAM core not provided with ARM CM3 core\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+#else /* DEBUG */
+static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ return 0;
+}
+#endif
+
+static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+ switch (ci->pub.chip) {
+ case BCM4329_CHIP_ID:
+ ci->pub.ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM43143_CHIP_ID:
+ ci->pub.ramsize = BCM43143_RAMSIZE;
+ break;
+ case BCM43241_CHIP_ID:
+ ci->pub.ramsize = 0x90000;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->pub.ramsize = 0x48000;
+ break;
+ case BCM4334_CHIP_ID:
+ ci->pub.ramsize = 0x80000;
+ break;
+ case BCM4335_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ case BCM43362_CHIP_ID:
+ ci->pub.ramsize = 0x3c000;
+ break;
+ case BCM4339_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ default:
+ brcmf_err("unknown chip: %s\n", ci->pub.name);
+ break;
+ }
+}
+
+static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u8 *type)
+{
+ u32 val;
+
+ /* read next descriptor */
+ val = ci->ops->read32(ci->ctx, *eromaddr);
+ *eromaddr += 4;
+
+ if (!type)
+ return val;
+
+ /* determine descriptor type */
+ *type = (val & DMP_DESC_TYPE_MSK);
+ if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
+ *type = DMP_DESC_ADDRESS;
+
+ return val;
+}
+
+static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u32 *regbase, u32 *wrapbase)
+{
+ u8 desc;
+ u32 val;
+ u8 mpnum = 0;
+ u8 stype, sztype, wraptype;
+
+ *regbase = 0;
+ *wrapbase = 0;
+
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ if (desc == DMP_DESC_MASTER_PORT) {
+ mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
+ wraptype = DMP_SLAVE_TYPE_MWRAP;
+ } else if (desc == DMP_DESC_ADDRESS) {
+ /* revert erom address */
+ *eromaddr -= 4;
+ wraptype = DMP_SLAVE_TYPE_SWRAP;
+ } else {
+ *eromaddr -= 4;
+ return -EILSEQ;
+ }
+
+ do {
+ /* locate address descriptor */
+ do {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ /* unexpected table end */
+ if (desc == DMP_DESC_EOT) {
+ *eromaddr -= 4;
+ return -EFAULT;
+ }
+ } while (desc != DMP_DESC_ADDRESS);
+
+ /* skip upper 32-bit address descriptor */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+
+ sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
+
+ /* next size descriptor can be skipped */
+ if (sztype == DMP_SLAVE_SIZE_DESC) {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ /* skip upper size descriptor if present */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ }
+
+ /* only look for 4K register regions */
+ if (sztype != DMP_SLAVE_SIZE_4K)
+ continue;
+
+ stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
+
+ /* only regular slave and wrapper */
+ if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
+ *regbase = val & DMP_SLAVE_ADDR_BASE;
+ if (*wrapbase == 0 && stype == wraptype)
+ *wrapbase = val & DMP_SLAVE_ADDR_BASE;
+ } while (*regbase == 0 || *wrapbase == 0);
+
+ return 0;
+}
+
+static
+int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 eromaddr;
+ u8 desc_type = 0;
+ u32 val;
+ u16 id;
+ u8 nmp, nsp, nmw, nsw, rev;
+ u32 base, wrap;
+ int err;
+
+ eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
+
+ while (desc_type != DMP_DESC_EOT) {
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (!(val & DMP_DESC_VALID))
+ continue;
+
+ if (desc_type == DMP_DESC_EMPTY)
+ continue;
+
+ /* need a component descriptor */
+ if (desc_type != DMP_DESC_COMPONENT)
+ continue;
+
+ id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
+
+ /* next descriptor must be component as well */
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
+ return -EFAULT;
+
+ /* only look at cores with master port(s) */
+ nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+ nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
+ nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
+ nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
+ rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
+
+ /* need core with ports */
+ if (nmw + nsw == 0)
+ continue;
+
+ /* try to obtain register address info */
+ err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
+ if (err)
+ continue;
+
+ /* finally a core to be added */
+ core = brcmf_chip_add_core(ci, id, base, wrap);
+ if (IS_ERR(core))
+ return PTR_ERR(core);
+
+ core->rev = rev;
+ }
+
+ return 0;
+}
+
+static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 regdata;
+ u32 socitype;
+
+ /* Get CC core rev
+ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
+ ci->pub.chip = regdata & CID_ID_MASK;
+ ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
+ brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
+ socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
+ ci->pub.chiprev);
+
+ if (socitype == SOCI_SB) {
+ if (ci->pub.chip != BCM4329_CHIP_ID) {
+ brcmf_err("SB chip is not supported\n");
+ return -ENODEV;
+ }
+ ci->iscoreup = brcmf_chip_sb_iscoreup;
+ ci->coredisable = brcmf_chip_sb_coredisable;
+ ci->resetcore = brcmf_chip_sb_resetcore;
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
+ SI_ENUM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
+ BCM4329_CORE_BUS_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
+ BCM4329_CORE_SOCRAM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
+ BCM4329_CORE_ARM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ } else if (socitype == SOCI_AI) {
+ ci->iscoreup = brcmf_chip_ai_iscoreup;
+ ci->coredisable = brcmf_chip_ai_coredisable;
+ ci->resetcore = brcmf_chip_ai_resetcore;
+
+ brcmf_chip_dmp_erom_scan(ci);
+ } else {
+ brcmf_err("chip backplane type %u is not supported\n",
+ socitype);
+ return -ENODEV;
+ }
+
+ brcmf_chip_get_raminfo(ci);
+
+ return brcmf_chip_cores_check(ci);
+}
+
+static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
+{
+ struct brcmf_core *core;
+ struct brcmf_core_priv *cr4;
+ u32 val;
+
+
+ core = brcmf_chip_get_core(&chip->pub, id);
+ if (!core)
+ return;
+
+ switch (id) {
+ case BCMA_CORE_ARM_CM3:
+ brcmf_chip_coredisable(core, 0, 0);
+ break;
+ case BCMA_CORE_ARM_CR4:
+ cr4 = container_of(core, struct brcmf_core_priv, pub);
+
+ /* clear all IOCTL bits except HALT bit */
+ val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
+ val &= ARMCR4_BCMA_IOCTL_CPUHALT;
+ brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
+ ARMCR4_BCMA_IOCTL_CPUHALT);
+ break;
+ default:
+ brcmf_err("unknown id: %u\n", id);
+ break;
+ }
+}
+
+static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_chip *pub;
+ struct brcmf_core_priv *cc;
+ u32 base;
+ u32 val;
+ int ret = 0;
+
+ pub = &chip->pub;
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ base = cc->pub.base;
+
+ /* get chipcommon capabilites */
+ pub->cc_caps = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, capabilities));
+
+ /* get pmu caps & rev */
+ if (pub->cc_caps & CC_CAP_PMU) {
+ val = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, pmucapabilities));
+ pub->pmurev = val & PCAP_REV_MASK;
+ pub->pmucaps = val;
+ }
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
+ cc->pub.rev, pub->pmurev, pub->pmucaps);
+
+ /* execute bus core specific setup */
+ if (chip->ops->setup)
+ ret = chip->ops->setup(chip->ctx, pub);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+ return ret;
+}
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops)
+{
+ struct brcmf_chip_priv *chip;
+ int err = 0;
+
+ if (WARN_ON(!ops->read32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->write32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->prepare))
+ err = -EINVAL;
+ if (WARN_ON(!ops->exit_dl))
+ err = -EINVAL;
+ if (err < 0)
+ return ERR_PTR(-EINVAL);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&chip->cores);
+ chip->num_cores = 0;
+ chip->ops = ops;
+ chip->ctx = ctx;
+
+ err = ops->prepare(ctx);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_recognition(chip);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_setup(chip);
+ if (err < 0)
+ goto fail;
+
+ return &chip->pub;
+
+fail:
+ brcmf_chip_detach(&chip->pub);
+ return ERR_PTR(err);
+}
+
+void brcmf_chip_detach(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+ struct brcmf_core_priv *tmp;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry_safe(core, tmp, &chip->cores, list) {
+ list_del(&core->list);
+ kfree(core);
+ }
+ kfree(chip);
+}
+
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list)
+ if (core->pub.id == coreid)
+ return &core->pub;
+
+ return NULL;
+}
+
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *cc;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
+ return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
+ return &cc->pub;
+}
+
+bool brcmf_chip_iscoreup(struct brcmf_core *pub)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ return core->chip->iscoreup(core);
+}
+
+void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->coredisable(core, prereset, reset);
+}
+
+void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
+ u32 postreset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->resetcore(core, prereset, reset, postreset);
+}
+
+static void
+brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+}
+
+static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ if (!brcmf_chip_iscoreup(core)) {
+ brcmf_err("SOCRAM core is down after reset?\n");
+ return false;
+ }
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+
+ return true;
+}
+
+static inline void
+brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+ struct brcmf_core *core;
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+
+ /* restore ARM */
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
+ brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+ return true;
+}
+
+void brcmf_chip_enter_download(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm) {
+ brcmf_chip_cr4_enterdl(chip);
+ return;
+ }
+
+ brcmf_chip_cm3_enterdl(chip);
+}
+
+bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm)
+ return brcmf_chip_cr4_exitdl(chip, rstvec);
+
+ return brcmf_chip_cm3_exitdl(chip);
+}
+
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+{
+ u32 base, addr, reg, pmu_cc3_mask = ~0;
+ struct brcmf_chip_priv *chip;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* old chips with PMU version less than 17 don't support save restore */
+ if (pub->pmurev < 17)
+ return false;
+
+ base = brcmf_chip_get_chipcommon(pub)->base;
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+
+ switch (pub->chip) {
+ case BCM43241_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ /* read PMU chipcontrol register 3 */
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ chip->ops->write32(chip->ctx, addr, 3);
+ addr = CORE_CC_REG(base, chipcontrol_data);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & pmu_cc3_mask) != 0;
+ default:
+ addr = CORE_CC_REG(base, pmucapabilities_ext);
+ reg = chip->ops->read32(chip->ctx, addr);
+ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+ return false;
+
+ addr = CORE_CC_REG(base, retention_ctl);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644
index 000000000000..c32908da90c8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMF_CHIP_H
+#define BRCMF_CHIP_H
+
+#include <linux/types.h>
+
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+
+/**
+ * struct brcmf_chip - chip level information.
+ *
+ * @chip: chip identifier.
+ * @chiprev: chip revision.
+ * @cc_caps: chipcommon core capabilities.
+ * @pmucaps: PMU capabilities.
+ * @pmurev: PMU revision.
+ * @rambase: RAM base address (only applicable for ARM CR4 chips).
+ * @ramsize: amount of RAM on chip.
+ * @name: string representation of the chip identifier.
+ */
+struct brcmf_chip {
+ u32 chip;
+ u32 chiprev;
+ u32 cc_caps;
+ u32 pmucaps;
+ u32 pmurev;
+ u32 rambase;
+ u32 ramsize;
+ char name[8];
+};
+
+/**
+ * struct brcmf_core - core related information.
+ *
+ * @id: core identifier.
+ * @rev: core revision.
+ * @base: base address of core register space.
+ */
+struct brcmf_core {
+ u16 id;
+ u16 rev;
+ u32 base;
+};
+
+/**
+ * struct brcmf_buscore_ops - buscore specific callbacks.
+ *
+ * @read32: read 32-bit value over bus.
+ * @write32: write 32-bit value over bus.
+ * @prepare: prepare bus for core configuration.
+ * @setup: bus-specific core setup.
+ * @exit_dl: exit download state.
+ * The callback should use the provided @rstvec when non-zero.
+ */
+struct brcmf_buscore_ops {
+ u32 (*read32)(void *ctx, u32 addr);
+ void (*write32)(void *ctx, u32 addr, u32 value);
+ int (*prepare)(void *ctx);
+ int (*setup)(void *ctx, struct brcmf_chip *chip);
+ void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+};
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops);
+void brcmf_chip_detach(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+bool brcmf_chip_iscoreup(struct brcmf_core *core);
+void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
+void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+ u32 postreset);
+void brcmf_chip_enter_download(struct brcmf_chip *ci);
+bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
+
+#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3e991897d7ca..631d5dc5b6d5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/semaphore.h>
@@ -40,7 +41,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
-#include "sdio_chip.h"
+#include "chip.h"
#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -156,6 +157,33 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
/* intstatus */
#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
@@ -494,6 +522,52 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+ {16, 0x7},
+ {12, 0x5},
+ {8, 0x3},
+ {4, 0x1}
+};
+
#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
@@ -619,27 +693,24 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
-static int
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- *regvar = brcmf_sdiod_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
return ret;
}
-static int
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- brcmf_sdiod_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
return ret;
}
@@ -900,8 +971,8 @@ static int
brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(SDIO, "request %s currently %s\n",
+
+ brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE"));
@@ -953,6 +1024,86 @@ end:
}
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh)
+{
+ u32 addr;
+ int rv;
+ u32 shaddr = 0;
+ struct sdpcm_shared_le sh_le;
+ __le32 addr_le;
+
+ shaddr = bus->ci->rambase + bus->ramsize - 4;
+
+ /*
+ * Read last word in socram to determine
+ * address of sdpcm_shared structure
+ */
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ sdio_release_host(bus->sdiodev->func[1]);
+ if (rv < 0)
+ return rv;
+
+ addr = le32_to_cpu(addr_le);
+
+ brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (!brcmf_sdio_valid_shared_address(addr)) {
+ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /* Read hndrte_shared structure */
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
+ if (rv < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = le32_to_cpu(sh_le.flags);
+ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+ sh->assert_line = le32_to_cpu(sh_le.assert_line);
+ sh->console_addr = le32_to_cpu(sh_le.console_addr);
+ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+ brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+ SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+ struct sdpcm_shared sh;
+
+ if (brcmf_sdio_readshared(bus, &sh) == 0)
+ bus->console_addr = sh.console_addr;
+}
+#else
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
@@ -996,6 +1147,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
else
brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
bus->sdpcm_ver);
+
+ /*
+ * Retrieve console state address now that firmware should have
+ * updated it.
+ */
+ brcmf_sdio_get_console_addr(bus);
}
/*
@@ -2293,14 +2450,13 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
- u8 idx;
+ struct brcmf_core *buscore;
u32 addr;
unsigned long val;
int n, ret;
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- addr = bus->ci->c_inf[idx].base +
- offsetof(struct sdpcmd_regs, intstatus);
+ buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
@@ -2810,72 +2966,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
}
#ifdef DEBUG
-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
-{
- return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
-}
-
-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
- struct sdpcm_shared *sh)
-{
- u32 addr;
- int rv;
- u32 shaddr = 0;
- struct sdpcm_shared_le sh_le;
- __le32 addr_le;
-
- shaddr = bus->ci->rambase + bus->ramsize - 4;
-
- /*
- * Read last word in socram to determine
- * address of sdpcm_shared structure
- */
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
- sdio_release_host(bus->sdiodev->func[1]);
- if (rv < 0)
- return rv;
-
- addr = le32_to_cpu(addr_le);
-
- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
-
- /*
- * Check if addr is valid.
- * NVRAM length at the end of memory should have been overwritten.
- */
- if (!brcmf_sdio_valid_shared_address(addr)) {
- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
- addr);
- return -EINVAL;
- }
-
- /* Read hndrte_shared structure */
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
- if (rv < 0)
- return rv;
-
- /* Endianness */
- sh->flags = le32_to_cpu(sh_le.flags);
- sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
- sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
- sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
- sh->assert_line = le32_to_cpu(sh_le.assert_line);
- sh->console_addr = le32_to_cpu(sh_le.console_addr);
- sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
-
- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
- brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
- SDPCM_SHARED_VERSION,
- sh->flags & SDPCM_SHARED_VERSION_MASK);
- return -EPROTO;
- }
-
- return 0;
-}
-
static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
struct sdpcm_shared *sh, char __user *data,
size_t count)
@@ -3105,6 +3195,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
debugfs_create_file("forensics", S_IRUGO, dentry, bus,
&brcmf_sdio_forensic_ops);
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+ debugfs_create_u32("console_interval", 0644, dentry,
+ &bus->console_interval);
}
#else
static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3223,32 +3315,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
const struct firmware *fw)
{
int err;
- int offset;
- int address;
- int len;
brcmf_dbg(TRACE, "Enter\n");
- err = 0;
- offset = 0;
- address = bus->ci->rambase;
- while (offset < fw->size) {
- len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
- fw->size - offset;
- err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
- if (err) {
- brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- err, len, address);
- return err;
- }
- offset += len;
- address += len;
- }
- if (!err)
- if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
- (u8 *)fw->data, fw->size))
- err = -EIO;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
+ (u8 *)fw->data, fw->size);
+ if (err)
+ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+ err, (int)fw->size, bus->ci->rambase);
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+ (u8 *)fw->data, fw->size))
+ err = -EIO;
return err;
}
@@ -3291,7 +3368,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+ brcmf_chip_enter_download(bus->ci);
fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL) {
@@ -3323,7 +3400,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
}
/* Take arm out of reset */
- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
+ if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
@@ -3338,40 +3415,6 @@ err:
return bcmerror;
}
-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
-{
- u32 addr, reg, pmu_cc3_mask = ~0;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* old chips with PMU version less than 17 don't support save restore */
- if (bus->ci->pmurev < 17)
- return false;
-
- switch (bus->ci->chip) {
- case BCM43241_CHIP_ID:
- case BCM4335_CHIP_ID:
- case BCM4339_CHIP_ID:
- /* read PMU chipcontrol register 3 */
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & pmu_cc3_mask) != 0;
- default:
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
- if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
- return false;
-
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
- PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
- }
-}
-
static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
@@ -3423,7 +3466,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
- if (bus->ci->c_inf[1].rev < 12)
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
return 0;
val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@@ -3454,15 +3497,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
uint pad_size;
u32 value;
- u8 idx;
int err;
/* the commands below use the terms tx and rx from
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- if (bus->ci->c_inf[idx].rev < 12) {
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3573,7 +3614,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
ret = -ENODEV;
}
- if (brcmf_sdio_sr_capable(bus)) {
+ if (brcmf_chip_sr_capable(bus->ci)) {
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
@@ -3722,6 +3763,170 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
}
}
+static void
+brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci, u32 drivestrength)
+{
+ const struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask;
+ u32 str_shift;
+ u32 base;
+ u32 i;
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ u32 addr;
+
+ if (!(ci->cc_caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = sdiod_drvstr_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+ /* note: 43143 does not support tristate */
+ i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+ if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+ str_tab = sdiod_drvstr_tab2_3v3;
+ str_mask = 0x00000007;
+ str_shift = 0;
+ } else
+ brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+ ci->name, drivestrength);
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ ci->name, ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+ base = brcmf_chip_get_chipcommon(ci)->base;
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+
+ brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+ str_tab[i].strength, drivestrength, cc_data_temp);
+ }
+}
+
+static int brcmf_sdio_buscoreprep(void *ctx)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_err("error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ struct brcmf_core *core;
+ u32 reg_addr;
+
+ /* clear all interrupts */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
+ reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+
+ if (rstvec)
+ /* Write reset vector to address 0 */
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+ sizeof(rstvec));
+}
+
+static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ u32 val, rev;
+
+ val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+ rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (rev >= 2) {
+ val &= ~CID_ID_MASK;
+ val |= BCM4339_CHIP_ID;
+ }
+ }
+ return val;
+}
+
+static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+
+ brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+}
+
+static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
+ .prepare = brcmf_sdio_buscoreprep,
+ .exit_dl = brcmf_sdio_buscore_exitdl,
+ .read32 = brcmf_sdio_buscore_read32,
+ .write32 = brcmf_sdio_buscore_write32,
+};
+
static bool
brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
@@ -3737,7 +3942,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
- * Force PLL off until brcmf_sdio_chip_attach()
+ * Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
@@ -3758,8 +3963,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
- brcmf_err("brcmf_sdio_chip_attach failed!\n");
+ bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+ if (IS_ERR(bus->ci)) {
+ brcmf_err("brcmf_chip_attach failed!\n");
+ bus->ci = NULL;
goto fail;
}
@@ -3772,7 +3979,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
drivestrength = bus->sdiodev->pdata->drive_strength;
else
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
- brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+ brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
/* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
@@ -3795,24 +4002,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
- reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
+ reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
pmucontrol);
- reg_val = brcmf_sdiod_regrl(bus->sdiodev,
- reg_addr,
- &err);
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(bus->sdiodev,
- reg_addr,
- reg_val,
- &err);
+ brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
-
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -4027,14 +4228,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
-
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
}
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
if (bus->ci) {
if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
sdio_claim_host(bus->sdiodev->func[1]);
@@ -4045,12 +4246,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
* all necessary cores.
*/
msleep(20);
- brcmf_sdio_chip_enter_download(bus->sdiodev,
- bus->ci);
+ brcmf_chip_enter_download(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
}
- brcmf_sdio_chip_detach(&bus->ci);
+ brcmf_chip_detach(bus->ci);
}
brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 22adbe311d20..59a5af5bf994 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+brcmf_create_iovar(char *name, const char *data, u32 datalen,
+ char *buf, u32 buflen)
{
u32 len;
@@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 77eae86e55c2..a30be683f4a1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
deleted file mode 100644
index 82bf3c5d3cdc..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ /dev/null
@@ -1,972 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/* ***** SDIO interface chip backplane handle functions ***** */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/ssb/ssb_regs.h>
-#include <linux/bcma/bcma.h>
-
-#include <chipcommon.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include <soc.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
-#include "sdio_chip.h"
-
-/* chip core base & ramsize */
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-
-/* bcm43143 */
-/* SDIO device core */
-#define BCM43143_CORE_BUS_BASE 0x18002000
-/* internal memory core */
-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM43143_CORE_ARM_BASE 0x18003000
-#define BCM43143_RAMSIZE 0x70000
-
-/* All D11 cores, ID 0x812 */
-#define BCM43xx_CORE_D11_BASE 0x18001000
-
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
- ((sbidh) & SSB_IDHIGH_RCLO))
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_SB 0
-#define SOCI_AI 1
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK 0xff000000
-#define CIB_REV_SHIFT 24
-
-/* ARM CR4 core specific control flag bits */
-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
-
-/* D11 core specific control flag bits */
-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
-#define D11_BCMA_IOCTL_PHYRESET 0x0008
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
- {32, 0x6},
- {26, 0x7},
- {22, 0x4},
- {16, 0x5},
- {12, 0x2},
- {8, 0x3},
- {4, 0x0},
- {0, 0x1}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
- {6, 0x7},
- {5, 0x6},
- {4, 0x5},
- {3, 0x4},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
- {3, 0x3},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0} };
-
-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
- {16, 0x7},
- {12, 0x5},
- {8, 0x3},
- {4, 0x1}
-};
-
-u8
-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
- if (coreid == ci->c_inf[idx].id)
- return idx;
-
- return BRCMF_MAX_CORENUM;
-}
-
-static u32
-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
- NULL);
- return SBCOREREV(regdata);
-}
-
-static u32
-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-static bool
-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
- SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
- return (SSB_TMSLOW_CLOCK == regdata);
-}
-
-static bool
-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
- bool ret;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
- ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
-
- return ret;
-}
-
-static void
-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u32 regdata, base;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- base = ci->c_inf[idx].base;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if (regdata & SSB_TMSLOW_RESET)
- return;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_BUSY)
- brcmf_err("core state still busy\n");
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL) &
- SSB_IMSTATE_BUSY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata &= ~SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* if core is already in reset, just return */
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
- if ((regdata & BCMA_RESET_CTL_RESET) != 0)
- return;
-
- /* configure reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-
- /* put in reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
- usleep_range(10, 20);
-
- /* wait till reset is 1 */
- SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
- BCMA_RESET_CTL_RESET, 300);
-
- /* post reset configure */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-static void
-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
- NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* clear any serror */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- 0, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- NULL);
- if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
- NULL);
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* must disable first to work for arbitrary current core state */
- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
- BCMA_RESET_CTL_RESET) {
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
- usleep_range(40, 60);
- }
-
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
- BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-#ifdef DEBUG
-/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- u8 core_idx;
-
- /* check RAM core presence for ARM CM3 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != core_idx) {
- core_idx = brcmf_sdio_chip_getinfidx(ci,
- BCMA_CORE_INTERNAL_MEM);
- if (BRCMF_MAX_CORENUM == core_idx) {
- brcmf_err("RAM core not provided with ARM CM3 core\n");
- return -ENODEV;
- }
- }
-
- /* check RAM base for ARM CR4 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != core_idx) {
- if (ci->rambase == 0) {
- brcmf_err("RAM base not provided with ARM CR4 core\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-#else /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- return 0;
-}
-#endif
-
-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 regdata;
- u32 socitype;
-
- /* Get CC core rev
- * Chipid is assume to be at offset 0 from SI_ENUM_BASE
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(SI_ENUM_BASE, chipid),
- NULL);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
- ci->chiprev >= 2)
- ci->chip = BCM4339_CHIP_ID;
- socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
-
- brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
- socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
-
- if (socitype == SOCI_SB) {
- if (ci->chip != BCM4329_CHIP_ID) {
- brcmf_err("SB chip is not supported\n");
- return -ENODEV;
- }
- ci->iscoreup = brcmf_sdio_sb_iscoreup;
- ci->corerev = brcmf_sdio_sb_corerev;
- ci->coredisable = brcmf_sdio_sb_coredisable;
- ci->resetcore = brcmf_sdio_sb_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- } else if (socitype == SOCI_AI) {
- ci->iscoreup = brcmf_sdio_ai_iscoreup;
- ci->corerev = brcmf_sdio_ai_corerev;
- ci->coredisable = brcmf_sdio_ai_coredisable;
- ci->resetcore = brcmf_sdio_ai_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM43143_CHIP_ID:
- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
- ci->c_inf[0].cib = 0x2b000000;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
- ci->c_inf[1].cib = 0x18000000;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
- ci->c_inf[2].cib = 0x14000000;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->c_inf[3].cib = 0x07000000;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = BCM43143_RAMSIZE;
- break;
- case BCM43241_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2a084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0e004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x14080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x90000;
- break;
- case BCM4330_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x07004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x0d080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x48000;
- break;
- case BCM4334_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x29004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0d004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x13080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x80000;
- break;
- case BCM4335_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2b084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x0f004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x01084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- case BCM43362_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0a004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x08080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x3C000;
- break;
- case BCM4339_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2e084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x15004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x04084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- default:
- brcmf_err("AXI chip is not supported\n");
- return -ENODEV;
- }
- } else {
- brcmf_err("chip backplane type %u is not supported\n",
- socitype);
- return -ENODEV;
- }
-
- return brcmf_sdio_chip_cichk(ci);
-}
-
-static int
-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
-{
- int err = 0;
- u8 clkval, clkset;
-
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_err("error writing for HT off\n");
- return err;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-
- if ((clkval & ~SBSDIO_AVBITS) != clkset) {
- brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- return -EACCES;
- }
-
- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- return -EBUSY;
- }
-
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- udelay(65);
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- return 0;
-}
-
-static void
-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 base = ci->c_inf[0].base;
-
- /* get chipcommon rev */
- ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
-
- /* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
- NULL);
-
- /* get pmu caps & rev */
- if (ci->c_inf[0].caps & CC_CAP_PMU) {
- ci->pmucaps =
- brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
- NULL);
- ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
- }
-
- ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->c_inf[0].rev, ci->pmurev,
- ci->c_inf[1].rev, ci->c_inf[1].id);
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
-}
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr)
-{
- int ret;
- struct brcmf_chip *ci;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
- if (!ci)
- return -ENOMEM;
-
- ret = brcmf_sdio_chip_buscoreprep(sdiodev);
- if (ret != 0)
- goto err;
-
- ret = brcmf_sdio_chip_recognition(sdiodev, ci);
- if (ret != 0)
- goto err;
-
- brcmf_sdio_chip_buscoresetup(sdiodev, ci);
-
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- 0, NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
- 0, NULL);
-
- *ci_ptr = ci;
- return 0;
-
-err:
- kfree(ci);
- return ret;
-}
-
-void
-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(*ci_ptr);
- *ci_ptr = NULL;
-}
-
-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-void
-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 drivestrength)
-{
- const struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask;
- u32 str_shift;
- char chn[8];
- u32 base = ci->c_inf[0].base;
- u32 i;
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- u32 addr;
-
- if (!(ci->c_inf[0].caps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
- str_tab = sdiod_drvstr_tab1_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
- str_tab = sdiod_drvstr_tab6_1v8;
- str_mask = 0x00001800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
- /* note: 43143 does not support tristate */
- i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
- if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
- str_tab = sdiod_drvstr_tab2_3v3;
- str_mask = 0x00000007;
- str_shift = 0;
- } else
- brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- drivestrength);
- break;
- case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
- str_tab = sdiod_drive_strength_tab5_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- ci->chiprev, ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
- addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
-
- brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
- str_tab[i].strength, drivestrength, cc_data_temp);
- }
-}
-
-static void
-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
-}
-
-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 core_idx;
- u32 reg_addr;
-
- if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
- brcmf_err("SOCRAM core is down after reset?\n");
- return false;
- }
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
-
- return true;
-}
-
-static inline void
-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
- idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
-
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
- regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
- ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
-}
-
-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 core_idx;
- u32 reg_addr;
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- /* Write reset vector to address 0 */
- brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
- sizeof(rstvec));
-
- /* restore ARM */
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
- 0, 0);
-
- return true;
-}
-
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx) {
- brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
- return;
- }
-
- brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
-}
-
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx)
- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
-
- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
deleted file mode 100644
index fb0614329ede..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMFMAC_SDIO_CHIP_H_
-#define _BRCMFMAC_SDIO_CHIP_H_
-
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
-/* SDIO function 1 register CHIPCLKCSR */
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
-#define BRCMF_MAX_CORENUM 6
-
-struct brcmf_core {
- u16 id;
- u16 rev;
- u32 base;
- u32 wrapbase;
- u32 caps;
- u32 cib;
-};
-
-struct brcmf_chip {
- u32 chip;
- u32 chiprev;
- /* core info */
- /* always put chipcommon core at 0, bus core at 1 */
- struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
- u32 pmurev;
- u32 pmucaps;
- u32 ramsize;
- u32 rambase;
- u32 rst_vec; /* reset vertor for ARM CR4 core */
-
- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits);
- void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits);
-};
-
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
-};
-
-/* sdio core registers */
-struct sdpcmd_regs {
- u32 corecontrol; /* 0x00, rev8 */
- u32 corestatus; /* rev8 */
- u32 PAD[1];
- u32 biststatus; /* rev8 */
-
- /* PCMCIA access */
- u16 pcmciamesportaladdr; /* 0x010, rev8 */
- u16 PAD[1];
- u16 pcmciamesportalmask; /* rev8 */
- u16 PAD[1];
- u16 pcmciawrframebc; /* rev8 */
- u16 PAD[1];
- u16 pcmciaunderflowtimer; /* rev8 */
- u16 PAD[1];
-
- /* interrupt */
- u32 intstatus; /* 0x020, rev8 */
- u32 hostintmask; /* rev8 */
- u32 intmask; /* rev8 */
- u32 sbintstatus; /* rev8 */
- u32 sbintmask; /* rev8 */
- u32 funcintmask; /* rev4 */
- u32 PAD[2];
- u32 tosbmailbox; /* 0x040, rev8 */
- u32 tohostmailbox; /* rev8 */
- u32 tosbmailboxdata; /* rev8 */
- u32 tohostmailboxdata; /* rev8 */
-
- /* synchronized access to registers in SDIO clock domain */
- u32 sdioaccess; /* 0x050, rev8 */
- u32 PAD[3];
-
- /* PCMCIA frame control */
- u8 pcmciaframectrl; /* 0x060, rev8 */
- u8 PAD[3];
- u8 pcmciawatermark; /* rev8 */
- u8 PAD[155];
-
- /* interrupt batching control */
- u32 intrcvlazy; /* 0x100, rev8 */
- u32 PAD[3];
-
- /* counters */
- u32 cmd52rd; /* 0x110, rev8 */
- u32 cmd52wr; /* rev8 */
- u32 cmd53rd; /* rev8 */
- u32 cmd53wr; /* rev8 */
- u32 abort; /* rev8 */
- u32 datacrcerror; /* rev8 */
- u32 rdoutofsync; /* rev8 */
- u32 wroutofsync; /* rev8 */
- u32 writebusy; /* rev8 */
- u32 readwait; /* rev8 */
- u32 readterm; /* rev8 */
- u32 writeterm; /* rev8 */
- u32 PAD[40];
- u32 clockctlstatus; /* rev8 */
- u32 PAD[7];
-
- u32 PAD[128]; /* DMA engines */
-
- /* SDIO/PCMCIA CIS region */
- char cis[512]; /* 0x400-0x5ff, rev6 */
-
- /* PCMCIA function control registers */
- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
- u16 PAD[55];
-
- /* PCMCIA backplane access */
- u16 backplanecsr; /* 0x76E, rev6 */
- u16 backplaneaddr0; /* rev6 */
- u16 backplaneaddr1; /* rev6 */
- u16 backplaneaddr2; /* rev6 */
- u16 backplaneaddr3; /* rev6 */
- u16 backplanedata0; /* rev6 */
- u16 backplanedata1; /* rev6 */
- u16 backplanedata2; /* rev6 */
- u16 backplanedata3; /* rev6 */
- u16 PAD[31];
-
- /* sprom "size" & "blank" info */
- u16 spromstatus; /* 0x7BE, rev2 */
- u32 PAD[464];
-
- u16 PAD[0x80];
-};
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci,
- u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci);
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec);
-
-#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 092e9c824992..5e53eb1b2ffa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -182,6 +182,95 @@ struct brcmf_sdio_dev {
uint max_segment_size;
};
+/* sdio core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
+};
+
/* Register/deregister interrupt handler. */
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d7718a5fa2f0..a54db9185747 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -351,13 +351,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
* triples, returning a pointer to the substring whose first element
* matches tag
*/
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key)
{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *)buf;
- totlen = buflen;
+ const struct brcmf_tlv *elt = buf;
+ int totlen = buflen;
/* find tagged parameter */
while (totlen >= TLV_HDR_LEN) {
@@ -378,8 +376,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
* not update the tlvs buffer pointer/length.
*/
static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
+brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@@ -401,12 +399,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
}
static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+brcmf_find_wpaie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
}
@@ -414,9 +412,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
}
static struct brcmf_vs_tlv *
-brcmf_find_wpsie(u8 *parse, u32 len)
+brcmf_find_wpsie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@@ -1562,9 +1560,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
- void *ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
+ const void *ie;
u32 ie_len;
struct brcmf_ext_join_params_le *ext_join_params;
u16 chanspec;
@@ -1591,7 +1589,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ie_len = wpa_ie->len + TLV_HDR_LEN;
} else {
/* find the RSN_IE */
- rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
+ sme->ie_len,
WLAN_EID_RSN);
if (rsn_ie) {
ie = rsn_ie;
@@ -1981,7 +1980,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- if (mac_addr) {
+ if (mac_addr &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
@@ -2164,6 +2165,8 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
u8 *bssid = profile->bssid;
struct brcmf_sta_info_le sta_info_le;
+ u32 beacon_period;
+ u32 dtim_period;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
if (!check_vif_up(ifp->vif))
@@ -2218,6 +2221,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
+ &beacon_period);
+ if (err) {
+ brcmf_err("Could not get beacon period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.beacon_interval =
+ beacon_period;
+ brcmf_dbg(CONN, "Beacon peroid %d\n",
+ beacon_period);
+ }
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
+ &dtim_period);
+ if (err) {
+ brcmf_err("Could not get DTIM period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.dtim_period = dtim_period;
+ brcmf_dbg(CONN, "DTIM peroid %d\n",
+ dtim_period);
+ }
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
}
} else
err = -EPERM;
@@ -2455,7 +2482,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
- struct brcmf_tlv *tim;
+ const struct brcmf_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
@@ -3220,8 +3247,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
- bool is_rsn_ie)
+brcmf_configure_wpaie(struct net_device *ndev,
+ const struct brcmf_vs_tlv *wpa_ie,
+ bool is_rsn_ie)
{
struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
@@ -3707,11 +3735,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_tlv *ssid_ie;
+ const struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@@ -4658,6 +4686,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct net_device *ndev = ifp->ndev;
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct ieee80211_channel *chan;
s32 err = 0;
if (ifp->vif->mode == WL_MODE_AP) {
@@ -4665,9 +4694,10 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else if (brcmf_is_linkup(e)) {
brcmf_dbg(CONN, "Linkup\n");
if (brcmf_is_ibssmode(ifp->vif)) {
+ chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -5164,9 +5194,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
- ch.chnum, band_chan_arr[index].center_freq,
- ch.bw, ch.sb);
if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 2dc6a074e8ed..254feed2860e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -491,7 +491,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7a69b..4e5c0f8c9496 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
void hostap_remove_proc(local_info_t *local)
{
- remove_proc_subtree(local->ddev->name, hostap_proc);
+ proc_remove(local->proc);
}
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 9a45f6f626f6..76b0729ade17 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = il3945_rs_tx_status,
.get_rate = il3945_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 4d5e33259ca8..eaaeea19d8c5 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_4965_ops = {
.name = IL4965_RS_NAME,
.tx_status = il4965_rs_tx_status,
.get_rate = il4965_rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 3eb2102ce236..74b3b4de7bb7 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLWIFI && IWLDVM=n && IWLMVM=n
+config IWLWIFI_BCAST_FILTERING
+ bool "Enable broadcast filtering"
+ depends on IWLMVM
+ help
+ Say Y here to enable default bcast filtering configuration.
+
+ Enabling broadcast filtering will drop any incoming wireless
+ broadcast frames, except some very specific predefined
+ patterns (e.g. incoming arp requests).
+
+ If unsure, don't enable this option, as some programs might
+ expect incoming broadcasts for their normal operations.
+
menu "Debugging Options"
depends on IWLWIFI
@@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
+
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1fa64429bcc2..3d32f4120174 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
-iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index c24d1d3d55f6..73086c1629ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* disabled by default */
+ return false;
+}
+
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ if (!iwl_enable_rx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ if (!iwl_enable_tx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 0977d93b529d..aa773a2da4ab 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
{0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
{0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
{0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
{0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
{0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
@@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
struct ieee80211_sta *sta, void *priv_sta)
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index bdd5644a400b..f6bd25cad203 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 2a59da2ff87a..fbd262ffa497 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,8 +71,8 @@
#define IWL3160_UCODE_API_MAX 8
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 7
-#define IWL3160_UCODE_API_OK 7
+#define IWL7260_UCODE_API_OK 8
+#define IWL3160_UCODE_API_OK 8
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 7
@@ -95,6 +95,8 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define NVM_HW_SECTION_NUM_FAMILY_7000 0
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -194,6 +197,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
.host_interrupt_operation_mode = true,
};
+static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
+ {.pwr = 1600, .backoff = 0},
+ {.pwr = 1300, .backoff = 467},
+ {.pwr = 900, .backoff = 1900},
+ {.pwr = 800, .backoff = 2630},
+ {.pwr = 700, .backoff = 3720},
+ {.pwr = 600, .backoff = 5550},
+ {.pwr = 500, .backoff = 9350},
+ {0},
+};
+
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
@@ -201,6 +215,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_2n_cfg = {
@@ -210,6 +225,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_n_cfg = {
@@ -219,6 +235,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
new file mode 100644
index 000000000000..f5bd82b88592
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL8000_UCODE_API_MAX 8
+
+/* Oldest version we won't warn about */
+#define IWL8000_UCODE_API_OK 8
+
+/* Lowest firmware API version supported */
+#define IWL8000_UCODE_API_MIN 8
+
+/* NVM versions */
+#define IWL8000_NVM_VERSION 0x0a1d
+#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
+
+#define IWL8000_FW_PRE "iwlwifi-8000-"
+#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_8000 10
+
+static const struct iwl_base_params iwl8000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl8000_ht_params = {
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_8000 \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl8000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
+
+const struct iwl_cfg iwl8260_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl8260_n_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 1ced525157dc..13ec56607d10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -84,6 +84,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
IWL_DEVICE_FAMILY_7000,
+ IWL_DEVICE_FAMILY_8000,
};
/*
@@ -192,6 +193,15 @@ struct iwl_eeprom_params {
bool enhanced_txpower;
};
+/* Tx-backoff power threshold
+ * @pwr: The power limit in mw
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_pwr_tx_backoff {
+ u32 pwr;
+ u32 backoff;
+};
+
/**
* struct iwl_cfg
* @name: Offical name of the device
@@ -217,6 +227,9 @@ struct iwl_eeprom_params {
* @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
+ * @d0i3: device uses d0i3 instead of d3
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @pwr_tx_backoffs: translation table between power limits and backoffs
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -247,6 +260,9 @@ struct iwl_cfg {
const bool internal_wimax_coex;
const bool host_interrupt_operation_mode;
bool high_temp;
+ bool d0i3;
+ u8 nvm_hw_section_num;
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};
/*
@@ -307,6 +323,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl8260_n_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 9d325516c42d..f13dec9ad9c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -395,38 +395,6 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
-/* SECURE boot registers */
-#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
-enum secure_boot_config_reg {
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
-};
-
-#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
-#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
-enum secure_boot_status_reg {
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
- CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
-};
-
-#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
-enum secure_load_status_reg {
- CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
- CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
-};
-
-#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
-#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
-
-#define CSR_SECURE_TIME_OUT (100)
-
-#define FH_TCSR_0_REG0 (0x1D00)
-
/*
* HBUS (Host-side Bus)
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a75aac986a23..c8cbdbe15924 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -126,6 +126,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_RPM 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@@ -189,5 +190,6 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c3728163be46..bcfdcfb4485e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -128,7 +128,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
- char firmware_name[25]; /* name of firmware file to load */
+ char firmware_name[32]; /* name of firmware file to load */
struct completion request_firmware_complete;
@@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
+ name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
@@ -1286,7 +1287,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
MODULE_PARM_DESC(11n_disable,
- "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
+ "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 5f1493c44097..f80ba586c253 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -95,6 +95,8 @@
* @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
* single bound interface).
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -119,6 +121,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+ IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+ IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/* The default calibrate table size if not specified by firmware file */
@@ -160,8 +164,7 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 6
-#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
+#define IWL_UCODE_SECTION_MAX 12
struct iwl_ucode_capabilities {
u32 max_probe_length;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index f98175a0d35b..07372f2b0250 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index c339c1bed080..9e81b23d738b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -72,6 +72,8 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 0a84ade7edac..b29075c3da8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -79,9 +79,12 @@ enum iwl_power_level {
IWL_POWER_NUM
};
-#define IWL_DISABLE_HT_ALL BIT(0)
-#define IWL_DISABLE_HT_TXAGG BIT(1)
-#define IWL_DISABLE_HT_RXAGG BIT(2)
+enum iwl_disable_11n {
+ IWL_DISABLE_HT_ALL = BIT(0),
+ IWL_DISABLE_HT_TXAGG = BIT(1),
+ IWL_DISABLE_HT_RXAGG = BIT(2),
+ IWL_ENABLE_HT_TXAGG = BIT(3),
+};
/**
* struct iwl_mod_params
@@ -90,7 +93,7 @@ enum iwl_power_level {
*
* @sw_crypto: using hardware encryption, default = 0
* @disable_11n: disable 11n capabilities, default = 0,
- * use IWL_DISABLE_HT_* constants
+ * use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
* @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 725e954d8475..53b9cad50477 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -402,11 +402,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
tx_chains, rx_chains);
- data->calib_version = 255; /* TODO:
- this value will prevent some checks from
- failing, we need to check if this
- field is still needed, and if it does,
- where is it in the NVM*/
+ data->calib_version = 255;
return data;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b5be51f3cd3d..5d78207040b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -131,6 +131,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
+ * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -148,6 +150,8 @@ struct iwl_op_mode_ops {
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
+ int (*enter_d0i3)(struct iwl_op_mode *op_mode);
+ int (*exit_d0i3)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -155,7 +159,7 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
- * @ops - pointer to its own ops
+ * @ops: pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
*/
@@ -226,4 +230,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
+static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->enter_d0i3)
+ return 0;
+ return op_mode->ops->enter_d0i3(op_mode);
+}
+
+static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->exit_d0i3)
+ return 0;
+ return op_mode->ops->exit_d0i3(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index fa77d63a277a..b761ac4822a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -72,7 +72,7 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_PAPD_CH_GROUPS 7
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
@@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
if (!entry)
return -EINVAL;
- if (WARN_ON_ONCE(!entry->size))
+ if (!entry->size)
continue;
/* Send the requested PHY DB section */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 100bd0d79681..9c90186d1744 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -105,6 +105,13 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+*/
+#define RELEASE_CPU_RESET (0x300C)
+#define RELEASE_CPU_RESET_BIT BIT(24)
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -281,4 +288,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
+/* SECURE boot registers */
+#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
+#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
+enum secure_boot_status_reg {
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
+enum secure_load_status_reg {
+ LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
+ LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
+ LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
+ LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
+#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
+#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
+#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
+
+#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
+#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
+#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
+#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
+
+#define LMPM_SECURE_TIME_OUT (100)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 1f065cf4a4ba..7b19274b550f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
+ * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
+ * command queue, but after other high priority commands. valid only
+ * with CMD_ASYNC.
+ * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
+ * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
+ * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
+ * (i.e. mark it as non-idle).
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
+ CMD_HIGH_PRIO = BIT(3),
+ CMD_SEND_IN_IDLE = BIT(4),
+ CMD_MAKE_TRANS_IDLE = BIT(5),
+ CMD_WAKE_UP_TRANS = BIT(6),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -335,6 +346,9 @@ enum iwl_d3_status {
* @STATUS_INT_ENABLED: interrupts are enabled
* @STATUS_RFKILL: the HW RFkill switch is in KILL position
* @STATUS_FW_ERROR: the fw is in error state
+ * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
+ * are sent
+ * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -343,6 +357,8 @@ enum iwl_trans_status {
STATUS_INT_ENABLED,
STATUS_RFKILL,
STATUS_FW_ERROR,
+ STATUS_TRANS_GOING_IDLE,
+ STATUS_TRANS_IDLE,
};
/**
@@ -443,6 +459,11 @@ struct iwl_trans;
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
* @set_bits_mask - set SRAM register according to value and mask.
+ * @ref: grab a reference to the transport/FW layers, disallowing
+ * certain low power states
+ * @unref: release a reference previously taken with @ref. Note that
+ * initially the reference count is 1, making an initial @unref
+ * necessary to allow low power states.
*/
struct iwl_trans_ops {
@@ -489,6 +510,8 @@ struct iwl_trans_ops {
unsigned long *flags);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
+ void (*ref)(struct iwl_trans *trans);
+ void (*unref)(struct iwl_trans *trans);
};
/**
@@ -523,6 +546,7 @@ enum iwl_trans_state {
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -551,6 +575,8 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
+ u64 dflt_pwr_limit;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test);
}
+static inline void iwl_trans_ref(struct iwl_trans *trans)
+{
+ if (trans->ops->ref)
+ trans->ops->ref(trans);
+}
+
+static inline void iwl_trans_unref(struct iwl_trans *trans)
+{
+ if (trans->ops->unref)
+ trans->ops->unref(trans);
+}
+
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index f98ec2b23898..41d390fd2ac8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o power_legacy.o bt-coex.o
+iwlmvm-y += power.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 76cde6ce6551..38a54a3fde34 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -378,7 +378,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
bt_cmd->flags = cpu_to_le32(flags);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
@@ -399,6 +398,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_TXRX_MAX_FREQ_0 |
BT_VALID_SYNC_TO_SCO);
+ if (IWL_MVM_BT_COEX_SYNC2SCO)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@@ -489,8 +491,7 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
- bool enable)
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
@@ -500,25 +501,16 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
.dataflags = { IWL_HCMD_DFL_DUP, },
.flags = CMD_ASYNC,
};
-
- struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int ret;
- if (sta_id == IWL_MVM_STATION_COUNT)
- return 0;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (!mvmsta)
return 0;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* nothing to do */
- if (mvmsta->bt_reduced_txpower == enable)
+ if (mvmsta->bt_reduced_txpower_dbg ||
+ mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -552,6 +544,7 @@ struct iwl_bt_iterator_data {
bool reduced_tx_power;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
+ bool primary_ll;
};
static inline
@@ -577,72 +570,113 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
+ u32 bt_activity_grading;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
- return;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /* default smps_mode for BSS / P2P client is AUTOMATIC */
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ data->num_bss_ifaces++;
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ /*
+ * Count unassoc BSSes, relax SMSP constraints
+ * and disable reduced Tx Power
+ */
+ if (!vif->bss_conf.assoc) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (iwl_mvm_bt_coex_reduced_txp(mvm,
+ mvmvif->ap_sta_id,
+ false))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /* default smps_mode for AP / GO is OFF */
+ smps_mode = IEEE80211_SMPS_OFF;
+ if (!mvmvif->ap_ibss_active) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ return;
+ }
+
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
+ break;
+ default:
+ return;
+ }
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
- /* ... and it is an associated STATION, relax constraints */
- if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ /* ... relax constraints and disable rssi events */
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- /* SoftAP / GO will always be primary */
- if (vif->type == NL80211_IFTYPE_AP) {
- if (!mvmvif->ap_ibss_active)
- return;
+ bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+ if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_STATIC;
+ else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+ smps_mode = vif->type == NL80211_IFTYPE_AP ?
+ IEEE80211_SMPS_OFF :
+ IEEE80211_SMPS_DYNAMIC;
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
+ mvmvif->id, data->notif->bt_status, bt_activity_grading,
+ smps_mode);
- /* the Ack / Cts kill mask must be default if AP / GO */
- data->reduced_tx_power = false;
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
- if (chanctx_conf == data->primary)
- return;
+ /* low latency is always primary */
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ data->primary_ll = true;
- /* downgrade the current primary no matter what its type is */
data->secondary = data->primary;
data->primary = chanctx_conf;
- return;
}
- data->num_bss_ifaces++;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
- /* we are now a STA / P2P Client, and take associated ones only */
- if (!vif->bss_conf.assoc)
+ if (chanctx_conf == data->primary)
+ return;
+
+ if (!data->primary_ll) {
+ /*
+ * downgrade the current primary no matter what its
+ * type is.
+ */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ } else {
+ /* there is low latency vif - we will be secondary */
+ data->secondary = chanctx_conf;
+ }
return;
+ }
- /* STA / P2P Client, try to be primary if first vif */
+ /*
+ * STA / P2P Client, try to be primary if first vif. If we are in low
+ * latency mode, we are already in primary and just don't do much
+ */
if (!data->primary || data->primary == chanctx_conf)
data->primary = chanctx_conf;
else if (!data->secondary)
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
- if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
- smps_mode = IEEE80211_SMPS_STATIC;
- else if (le32_to_cpu(data->notif->bt_activity_grading) >=
- BT_LOW_TRAFFIC)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
-
- IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
- mvmvif->id, data->notif->bt_status,
- data->notif->bt_activity_grading, smps_mode);
-
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
-
/* don't reduce the Tx power if in loose scheme */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 036857698565..2d133b1b2dde 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -78,5 +78,9 @@
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
+#define IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR 24 /* TU */
+#define IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR 24 /* TU */
+#define IWL_MVM_BT_COEX_SYNC2SCO 1
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index f36a7ee0267f..b956e2f0b631 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -963,7 +963,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret, i;
int len __maybe_unused;
- u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
/*
@@ -980,8 +979,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- old_aux_sta_id = mvm->aux_sta.sta_id;
-
/* see if there's only a single BSS vif and it's associated */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1067,16 +1064,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
- /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1096,16 +1083,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. As a result, we have to move
- * the auxiliary station to ID 1 so the ID 0 remains free for
- * the AP station for later.
- * We set the sta_id to 1 here, and reset it to its previous
- * value (that we stored above) later.
- */
- mvm->aux_sta.sta_id = 1;
-
ret = iwl_mvm_load_d3_fw(mvm);
if (ret)
goto out;
@@ -1191,11 +1168,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto out;
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
goto out;
@@ -1222,10 +1199,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
- mvm->aux_sta.sta_id = old_aux_sta_id;
- mvm_ap_sta->sta_id = old_ap_sta_id;
- mvmvif->ap_sta_id = old_ap_sta_id;
-
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
out_noreset:
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 0e29cd83a06a..29b4396018b1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
int bufsz = sizeof(buf);
int pos;
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+ pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
ap_sta_id = mvmvif->ap_sta_id;
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+ break;
+ default:
+ break;
+ }
+
pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d\n",
+ "ap_sta_id %d - reduced Tx power %d force %d\n",
ap_sta_id,
- mvm_sta->bt_reduced_txpower);
+ mvm_sta->bt_reduced_txpower,
+ mvm_sta->bt_reduced_txpower_dbg);
}
}
@@ -269,6 +293,36 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_sta *mvmsta;
+ bool reduced_tx_power;
+ int ret;
+
+ if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return -ENOTCONN;
+
+ if (strtobool(buf, &reduced_tx_power) != 0)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+ mvmsta->bt_reduced_txpower_dbg = false;
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ reduced_tx_power);
+ if (!ret)
+ mvmsta->bt_reduced_txpower_dbg = true;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? : count;
+}
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -403,9 +457,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -460,6 +514,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_update_low_latency(mvm, vif, value);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[3];
+
+ buf[0] = mvmvif->low_latency ? '1' : '0';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -473,6 +562,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -496,15 +587,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
+ iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 369d4c90e669..6853e5efe522 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -90,7 +90,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@@ -105,13 +105,12 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+ if (!mvmsta)
ret = -ENOENT;
else
- ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
- count;
+ ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
mutex_unlock(&mvm->mutex);
@@ -251,7 +250,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -600,6 +599,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ const struct iwl_fw_bcast_filter *filter;
+ char *buf;
+ int bufsz = 1024;
+ int i, j, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+ filter = &cmd.filters[i];
+
+ ADD_TEXT("Filter [%d]:\n", i);
+ ADD_TEXT("\tDiscard=%d\n", filter->discard);
+ ADD_TEXT("\tFrame Type: %s\n",
+ filter->frame_type ? "IPv4" : "Generic");
+
+ for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+ const struct iwl_fw_bcast_filter_attr *attr;
+
+ attr = &filter->attrs[j];
+ if (!attr->mask)
+ break;
+
+ ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+ j, attr->offset,
+ attr->offset_type ? "IP End" :
+ "Payload Start",
+ be32_to_cpu(attr->mask),
+ be32_to_cpu(attr->val),
+ le16_to_cpu(attr->reserved1));
+ }
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int pos, next_pos;
+ struct iwl_fw_bcast_filter filter = {};
+ struct iwl_bcast_filter_cmd cmd;
+ u32 filter_id, attr_id, mask, value;
+ int err = 0;
+
+ if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+ &filter.frame_type, &pos) != 3)
+ return -EINVAL;
+
+ if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+ filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+ return -EINVAL;
+
+ for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+ attr_id++) {
+ struct iwl_fw_bcast_filter_attr *attr =
+ &filter.attrs[attr_id];
+
+ if (pos >= count)
+ break;
+
+ if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+ &attr->offset, &attr->offset_type,
+ &mask, &value, &next_pos) != 4)
+ return -EINVAL;
+
+ attr->mask = cpu_to_be32(mask);
+ attr->val = cpu_to_be32(value);
+ if (mask)
+ filter.num_attrs++;
+
+ pos += next_pos;
+ }
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+ &filter, sizeof(filter));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ char *buf;
+ int bufsz = 1024;
+ int i, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+ const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+ ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+ i, mac->default_discard, mac->attached_filters);
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_bcast_filter_cmd cmd;
+ struct iwl_fw_bcast_mac mac = {};
+ u32 mac_id, attached_filters;
+ int err = 0;
+
+ if (!mvm->bcast_filters)
+ return -ENOENT;
+
+ if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+ &attached_filters) != 3)
+ return -EINVAL;
+
+ if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+ mac.default_discard > 1 ||
+ attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+ return -EINVAL;
+
+ mac.attached_filters = cpu_to_le16(attached_filters);
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+ &mac, sizeof(mac));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -658,15 +838,74 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
+#define PRINT_MVM_REF(ref) do { \
+ if (test_bit(ref, mvm->ref_bitmap)) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t(0x%lx) %s\n", \
+ BIT(ref), #ref); \
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
+ mvm->ref_bitmap[0]);
+
+ PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+ PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+ PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+ PRINT_MVM_REF(IWL_MVM_REF_USER);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long value;
+ int ret;
+ bool taken;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
+ if (value == 1 && !taken)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+ else if (value == 0 && taken)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, mvm, \
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
/* Device wide debugfs entries */
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@@ -680,6 +919,12 @@ MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@@ -687,6 +932,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
+ struct dentry *bcast_dir __maybe_unused;
char buf[100];
mvm->debugfs_dir = dbgfs_dir;
@@ -705,6 +951,27 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+ bcast_dir = debugfs_create_dir("bcast_filtering",
+ mvm->debugfs_dir);
+ if (!bcast_dir)
+ goto err;
+
+ if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
+ bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override))
+ goto err;
+
+ MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ }
+#endif
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 1b4e54d416b0..20b723d6270f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -70,37 +70,24 @@
/**
* enum iwl_bt_coex_flags - flags for BT_COEX command
- * @BT_CH_PRIMARY_EN:
- * @BT_CH_SECONDARY_EN:
- * @BT_NOTIF_COEX_OFF:
* @BT_COEX_MODE_POS:
* @BT_COEX_MODE_MSK:
* @BT_COEX_DISABLE:
* @BT_COEX_2W:
* @BT_COEX_3W:
* @BT_COEX_NW:
- * @BT_USE_DEFAULTS:
- * @BT_SYNC_2_BT_DISABLE:
- * @BT_COEX_CORUNNING_TBL_EN:
+ * @BT_COEX_SYNC2SCO:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
- BT_CH_PRIMARY_EN = BIT(0),
- BT_CH_SECONDARY_EN = BIT(1),
- BT_NOTIF_COEX_OFF = BIT(2),
BT_COEX_MODE_POS = 3,
BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
- BT_USE_DEFAULTS = BIT(6),
- BT_SYNC_2_BT_DISABLE = BIT(7),
- BT_COEX_CORUNNING_TBL_EN = BIT(8),
- BT_COEX_MPLUT_TBL_EN = BIT(9),
- /* Bit 10 is reserved */
- BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
+ BT_COEX_SYNC2SCO = BIT(7),
};
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 8415ff312d0e..521997669c99 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -231,8 +231,12 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
- /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
struct iwl_wowlan_config_cmd {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 884c08725308..cbbcd8e284e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
/* Beacon filtering and beacon abort */
#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
#define IWL_BF_ENERGY_DELTA_MAX 255
#define IWL_BF_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
#define IWL_BF_TEMP_THRESHOLD_MAX 255
#define IWL_BF_TEMP_THRESHOLD_MIN 0
#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
#define IWL_BF_TEMP_FAST_FILTER_MAX 255
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_D0I3 1024
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
-#define IWL_BF_CMD_CONFIG_DEFAULTS \
- .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_energy_delta = \
- cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
- .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
- .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
- .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
- .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
- .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+#define IWL_BF_CMD_CONFIG(mode) \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 1b60fdff6a56..d63647867262 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
* @STA_SLEEP_STATE_AWAKE:
* @STA_SLEEP_STATE_PS_POLL:
* @STA_SLEEP_STATE_UAPSD:
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
*/
enum iwl_sta_sleep_flag {
- STA_SLEEP_STATE_AWAKE = 0,
- STA_SLEEP_STATE_PS_POLL = BIT(0),
- STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_MOREDATA = BIT(2),
};
/* STA ID and color bits definitions */
@@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
- * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
- * VER_6 of this command is quite similar to VER_5 except
+ * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
+ * VER_7 of this command is quite similar to VER_5 except
* exclusion of all fields related to the security key installation.
+ * It only differs from VER_6 by the "awake_acs" field that is
+ * reserved and ignored in VER_6.
*/
-struct iwl_mvm_add_sta_cmd_v6 {
+struct iwl_mvm_add_sta_cmd_v7 {
u8 add_modify;
- u8 reserved1;
+ u8 awake_acs;
__le16 tid_disable_tx;
__le32 mac_id_n_color;
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
struct iwl_mvm_wep_key wep_key[0];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ * short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+ __le32 remain_frame_count;
+ __le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 989d7dbdca6c..a7c88f1402e9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -163,6 +163,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
+ EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
@@ -190,6 +191,7 @@ enum {
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
+ BCAST_FILTER_CMD = 0xcf,
MCAST_FILTER_CMD = 0xd0,
/* D3 commands/notifications */
@@ -197,6 +199,7 @@ enum {
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
OFFLOADS_QUERY_CMD = 0xd5,
REMOTE_WAKE_CONFIG_CMD = 0xd6,
+ D0I3_END_CMD = 0xed,
/* for WoWLAN in particular */
WOWLAN_PATTERNS = 0xe0,
@@ -303,6 +306,7 @@ struct iwl_phy_cfg_cmd {
#define PHY_CFG_RX_CHAIN_B BIT(13)
#define PHY_CFG_RX_CHAIN_C BIT(14)
+#define NVM_MAX_NUM_SECTIONS 11
/* Target of the NVM_ACCESS_CMD */
enum {
@@ -313,14 +317,9 @@ enum {
/* Section types for NVM_ACCESS_CMD */
enum {
- NVM_SECTION_TYPE_HW = 0,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_PAPD,
- NVM_SECTION_TYPE_BT,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
- NVM_SECTION_TYPE_POST_FCS_CALIB,
- NVM_NUM_OF_SECTIONS,
+ NVM_SECTION_TYPE_SW = 1,
+ NVM_SECTION_TYPE_CALIBRATION = 4,
+ NVM_SECTION_TYPE_PRODUCTION = 5,
};
/**
@@ -412,6 +411,35 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
} __packed; /* ALIVE_RES_API_S_VER_1 */
+struct mvm_alive_resp_ver2 {
+ __le16 status;
+ __le16 flags;
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 id;
+ u8 api_minor;
+ u8 api_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le16 reserved2;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+ __le32 st_fwrd_addr; /* pointer to Store and forward */
+ __le32 st_fwrd_size;
+ u8 umac_minor; /* UMAC version: minor */
+ u8 umac_major; /* UMAC version: major */
+ __le16 umac_id; /* UMAC version: id */
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_2 */
+
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@@ -1159,6 +1187,90 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+#define MAX_BCAST_FILTERS 8
+#define MAX_BCAST_FILTER_ATTRS 2
+
+/**
+ * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
+ * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
+ * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
+ * start of ip payload).
+ */
+enum iwl_mvm_bcast_filter_attr_offset {
+ BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+ BCAST_FILTER_OFFSET_IP_END = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
+ * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
+ * @offset: starting offset of this pattern.
+ * @val: value to match - big endian (MSB is the first
+ * byte to match from offset pos).
+ * @mask: mask to match (big endian).
+ */
+struct iwl_fw_bcast_filter_attr {
+ u8 offset_type;
+ u8 offset;
+ __le16 reserved1;
+ __be32 val;
+ __be32 mask;
+} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
+
+/**
+ * enum iwl_mvm_bcast_filter_frame_type - filter frame type
+ * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
+ * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
+ */
+enum iwl_mvm_bcast_filter_frame_type {
+ BCAST_FILTER_FRAME_TYPE_ALL = 0,
+ BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter - broadcast filter
+ * @discard: discard frame (1) or let it pass (0).
+ * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
+ * @num_attrs: number of valid attributes in this filter.
+ * @attrs: attributes of this filter. a filter is considered matched
+ * only when all its attributes are matched (i.e. AND relationship)
+ */
+struct iwl_fw_bcast_filter {
+ u8 discard;
+ u8 frame_type;
+ u8 num_attrs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+} __packed; /* BCAST_FILTER_S_VER_1 */
+
+/**
+ * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
+ * @default_discard: default action for this mac (discard (1) / pass (0)).
+ * @attached_filters: bitmap of relevant filters for this mac.
+ */
+struct iwl_fw_bcast_mac {
+ u8 default_discard;
+ u8 reserved1;
+ __le16 attached_filters;
+} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
+
+/**
+ * struct iwl_bcast_filter_cmd - broadcast filtering configuration
+ * @disable: enable (0) / disable (1)
+ * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
+ * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
+ * @filters: broadcast filters
+ * @macs: broadcast filtering configuration per-mac
+ */
+struct iwl_bcast_filter_cmd {
+ u8 disable;
+ u8 max_bcast_filters;
+ u8 max_macs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+ struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
+
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c03d39541f9e..bae75b308fc0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -110,18 +110,46 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp *palive;
-
- palive = (void *)pkt->data;
-
- mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
-
- alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
- IWL_DEBUG_FW(mvm,
- "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
+ struct mvm_alive_resp_ver2 *palive2;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+
+ mvm->support_umac_log = false;
+ mvm->error_event_table =
+ le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+ alive_data->valid = le16_to_cpu(palive->status) ==
+ IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm,
+ "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype, palive->flags);
+ } else {
+ palive2 = (void *)pkt->data;
+
+ mvm->support_umac_log = true;
+ mvm->error_event_table =
+ le32_to_cpu(palive2->error_event_table_ptr);
+ mvm->log_event_table =
+ le32_to_cpu(palive2->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
+ mvm->umac_error_event_table =
+ le32_to_cpu(palive2->error_info_addr);
+
+ alive_data->valid = le16_to_cpu(palive2->status) ==
+ IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm,
+ "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive2->status), palive2->ver_type,
+ palive2->ver_subtype, palive2->flags);
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ palive2->umac_major, palive2->umac_minor);
+ }
return true;
}
@@ -439,10 +467,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_mvm_power_update_device_mode(mvm);
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
+ ret = iwl_power_legacy_set_cam_mode(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
+ /* allow FW/transport low power modes if not during restart */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index ba723d50939a..5c21aabb40cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 min_bi;
/* Skip the interface for which we are trying to assign a tsf_id */
if (vif == data->vif)
@@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
switch (data->vif->type) {
case NL80211_IFTYPE_STATION:
/*
- * The new interface is client, so if the existing one
- * we're iterating is an AP, and both interfaces have the
- * same beacon interval, the same TSF should be used to
- * avoid drift between the new client and existing AP,
- * the existing AP will get drift updates from the new
- * client context in this case
+ * The new interface is a client, so if the one we're iterating
+ * is an AP, and the beacon interval of the AP is a multiple or
+ * divisor of the beacon interval of the client, the same TSF
+ * should be used to avoid drift between the new client and
+ * existing AP. The existing AP will get drift updates from the
+ * new client context in this case.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if (vif->type != NL80211_IFTYPE_AP ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
+
case NL80211_IFTYPE_AP:
/*
- * The new interface is AP/GO, so in case both interfaces
- * have the same beacon interval, it should get drift
- * updates from an existing client or use the same
- * TSF as an existing GO. There's no drift between
- * TSFs internally but if they used different TSFs
- * then a new client MAC could update one of them
- * and cause drift that way.
+ * The new interface is AP/GO, so if its beacon interval is a
+ * multiple or a divisor of the beacon interval of an existing
+ * interface, it should get drift updates from an existing
+ * client or use the same TSF as an existing GO. There's no
+ * drift between TSFs internally but if they used different
+ * TSFs then a new client MAC could update one of them and
+ * cause drift that way.
*/
- if (vif->type == NL80211_IFTYPE_STATION ||
- vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if ((vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION) ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
default:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 6bf9766e5982..a49d1ef13f42 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -66,6 +66,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/if_arp.h>
#include <net/mac80211.h>
#include <net/tcp.h>
@@ -128,6 +129,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
};
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ * be the vif's ip address. in case there is not a single
+ * ip address (0, or more than 1), this attribute will
+ * be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ * the LSB bytes of the vif's mac address
+ */
+enum {
+ BC_FILTER_MAGIC_NONE = 0,
+ BC_FILTER_MAGIC_IP,
+ BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+ {
+ /* arp */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+ .attrs = {
+ {
+ /* frame type - arp, hw type - ethernet */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header),
+ .val = cpu_to_be32(0x08060001),
+ .mask = cpu_to_be32(0xffffffff),
+ },
+ {
+ /* arp dest ip */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header) + 2 +
+ sizeof(struct arphdr) +
+ ETH_ALEN + sizeof(__be32) +
+ ETH_ALEN,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+ },
+ },
+ },
+ {
+ /* dhcp offer bcast */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+ .attrs = {
+ {
+ /* udp dest port - 68 (bootp client)*/
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = offsetof(struct udphdr, dest),
+ .val = cpu_to_be32(0x00440000),
+ .mask = cpu_to_be32(0xffff0000),
+ },
+ {
+ /* dhcp - lsb bytes of client hw address */
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = 38,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+ },
+ },
+ },
+ /* last filter must be empty */
+ {},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!mvm->trans->cfg->d0i3)
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+ WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!mvm->trans->cfg->d0i3)
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+ WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_unref(mvm->trans);
+}
+
+static void
+iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
+{
+ int i;
+
+ if (!mvm->trans->cfg->d0i3)
+ return;
+
+ for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
+ if (ref == i)
+ continue;
+
+ IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
+ clear_bit(i, mvm->ref_bitmap);
+ iwl_trans_unref(mvm->trans);
+ }
+}
+
static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
{
int i;
@@ -203,6 +315,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -289,6 +404,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* assign default bcast filtering configuration */
+ mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -305,6 +425,9 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@@ -315,8 +438,16 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
goto drop;
- if (control->sta) {
- if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+ /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
+ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
+ ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control) &&
+ !ieee80211_is_action(hdr->frame_control)))
+ sta = NULL;
+
+ if (sta) {
+ if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
}
@@ -328,6 +459,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -347,7 +496,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
@@ -357,7 +506,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
break;
case IEEE80211_AMPDU_TX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
+ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
@@ -416,6 +565,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -423,6 +573,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
+ /* cleanup all stale references (scan, roc), but keep the
+ * ucode_down ref until reconfig is complete */
+ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
}
@@ -457,6 +611,9 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
ret);
+ /* allow transport/FW low power modes */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mutex_unlock(&mvm->mutex);
}
@@ -464,9 +621,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
mutex_lock(&mvm->mutex);
+
+ /* disallow low power states when the FW is down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
/* async_handlers_wk is now blocked */
/*
@@ -492,14 +654,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
-static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = data;
-
- iwl_mvm_power_update_mode(mvm, vif);
-}
-
static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
{
u16 i;
@@ -567,7 +721,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
- qmask);
+ qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
goto out_release;
@@ -581,10 +736,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- iwl_mvm_power_disable(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
+ if (ret)
+ goto out_release;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
if (ret)
goto out_remove_mac;
@@ -643,11 +800,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -736,11 +888,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@@ -858,6 +1006,156 @@ out:
*total_flags = 0;
}
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_bcast_filter_cmd *cmd;
+ u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+ const struct iwl_fw_bcast_filter *in_filter,
+ struct iwl_fw_bcast_filter *out_filter)
+{
+ struct iwl_fw_bcast_filter_attr *attr;
+ int i;
+
+ memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+ for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+ attr = &out_filter->attrs[i];
+
+ if (!attr->mask)
+ break;
+
+ switch (attr->reserved1) {
+ case cpu_to_le16(BC_FILTER_MAGIC_IP):
+ if (vif->bss_conf.arp_addr_cnt != 1) {
+ attr->mask = 0;
+ continue;
+ }
+
+ attr->val = vif->bss_conf.arp_addr_list[0];
+ break;
+ case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+ attr->val = *(__be32 *)&vif->addr[2];
+ break;
+ default:
+ break;
+ }
+ attr->reserved1 = 0;
+ out_filter->num_attrs++;
+ }
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_bcast_filter_cmd *cmd = data->cmd;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_fw_bcast_mac *bcast_mac;
+ int i;
+
+ if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+ return;
+
+ bcast_mac = &cmd->macs[mvmvif->id];
+
+ /* enable filtering only for associated stations */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ bcast_mac->default_discard = 1;
+
+ /* copy all configured filters */
+ for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+ /*
+ * Make sure we don't exceed our filters limit.
+ * if there is still a valid filter to be configured,
+ * be on the safe side and just allow bcast for this mac.
+ */
+ if (WARN_ON_ONCE(data->current_filter >=
+ ARRAY_SIZE(cmd->filters))) {
+ bcast_mac->default_discard = 0;
+ bcast_mac->attached_filters = 0;
+ break;
+ }
+
+ iwl_mvm_set_bcast_filter(vif,
+ &mvm->bcast_filters[i],
+ &cmd->filters[data->current_filter]);
+
+ /* skip current filter if it contains no attributes */
+ if (!cmd->filters[data->current_filter].num_attrs)
+ continue;
+
+ /* attach the filter to current mac */
+ bcast_mac->attached_filters |=
+ cpu_to_le16(BIT(data->current_filter));
+
+ data->current_filter++;
+ }
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd)
+{
+ struct iwl_bcast_iter_data iter_data = {
+ .mvm = mvm,
+ .cmd = cmd,
+ };
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+ cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* use debugfs filters/macs if override is configured */
+ if (mvm->dbgfs_bcast_filtering.override) {
+ memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+ sizeof(cmd->filters));
+ memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+ sizeof(cmd->macs));
+ return true;
+ }
+#endif
+
+ /* if no filters are configured, do nothing */
+ if (!mvm->bcast_filters)
+ return false;
+
+ /* configure and attach these filters for each associated sta vif */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bcast_filter_iterator, &iter_data);
+
+ return true;
+}
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_filter_cmd cmd;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+ return 0;
+
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return 0;
+}
+#endif
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -910,6 +1208,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
+ if (vif->p2p)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/*
* If update fails - SF might be running in associated
@@ -922,27 +1222,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
IWL_ERR(mvm, "failed to remove AP station\n");
+
+ if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
+
+ if (vif->p2p)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
}
iwl_mvm_recalc_multicast(mvm);
+ iwl_mvm_configure_bcast_filter(mvm, vif);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- /* Workaround for FW bug, otherwise FW disables device
- * power save upon disassociation
- */
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
- }
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
IEEE80211_SMPS_AUTOMATIC);
@@ -955,7 +1253,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
&mvmvif->time_event_data);
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@@ -969,10 +1267,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
- ret = iwl_mvm_update_beacon_filter(mvm, vif);
+ ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
+
+ if (changes & BSS_CHANGED_ARP_FILTER) {
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+ iwl_mvm_configure_bcast_filter(mvm, vif);
+ }
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@@ -1006,8 +1309,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
- mvmvif->ap_ibss_active = true;
-
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@@ -1019,7 +1320,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* power updated needs to be done before quotas */
mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@@ -1029,6 +1330,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
@@ -1036,7 +1339,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
out_quota_failed:
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@@ -1062,6 +1365,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -1071,7 +1376,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1085,26 +1390,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
u32 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
- BSS_CHANGED_HT |
- BSS_CHANGED_BANDWIDTH;
- int ret;
/* Changes will be applied when the AP/IBSS is started */
if (!mvmvif->ap_ibss_active)
return;
- if (changes & ht_change) {
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
- }
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH) &&
+ iwl_mvm_mac_ctxt_changed(mvm, vif))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
- if (changes & BSS_CHANGED_BEACON) {
- if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
- IWL_WARN(mvm, "Failed updating beacon data\n");
- }
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -1137,6 +1436,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_notification_wait wait_scan_done;
+ static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
int ret;
if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
@@ -1144,11 +1445,38 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- ret = iwl_mvm_scan_request(mvm, vif, req);
- else
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_SCHED:
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ NULL, NULL);
+ iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_wait_notification(&mvm->notif_wait,
+ &wait_scan_done, HZ);
+ if (ret) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /* iwl_mvm_rx_scan_offload_complete_notif() will be called
+ * soon but will not reset the scan status as it won't be
+ * IWL_MVM_SCAN_SCHED any more since we queue the next scan
+ * immediately (below)
+ */
+ break;
+ case IWL_MVM_SCAN_NONE:
+ break;
+ default:
ret = -EBUSY;
+ goto out;
+ }
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+ ret = iwl_mvm_scan_request(mvm, vif, req);
+ if (ret)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
mutex_unlock(&mvm->mutex);
return ret;
@@ -1168,20 +1496,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid,
+ struct ieee80211_sta *sta, u16 tids,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* TODO: how do we tell the fw to send frames for a specific TID */
+ /* Called when we need to transmit (a) frame(s) from mac80211 */
- /*
- * The fw will send EOSP notification when the last frame will be
- * transmitted.
- */
- iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from agg queue */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, true);
}
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1191,11 +1531,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int tid;
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+ continue;
+ if (iwl_mvm_tid_queued(tid_data) == 0)
+ continue;
+ ieee80211_sta_set_buffered(sta, tid, true);
+ }
+ spin_unlock_bh(&mvmsta->lock);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
* queues to this station will not be transmitted. The fw will
@@ -1286,12 +1640,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1756,7 +2110,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* otherwise fw will complain.
*/
mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@@ -1774,7 +2128,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -1807,7 +2161,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
@@ -1874,8 +2228,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif);
- return iwl_mvm_disable_beacon_filter(mvm, vif);
+ return iwl_mvm_enable_beacon_filter(mvm, vif,
+ CMD_SYNC);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
}
return -EOPNOTSUPP;
@@ -1914,6 +2269,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index e4ead86f06d6..ebea5f2e2741 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -92,7 +92,6 @@ enum iwl_mvm_tx_fifo {
};
extern struct ieee80211_ops iwl_mvm_hw_ops;
-extern const struct iwl_mvm_power_ops pm_legacy_ops;
extern const struct iwl_mvm_power_ops pm_mac_ops;
/**
@@ -159,20 +158,6 @@ enum iwl_power_scheme {
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
-struct iwl_mvm_power_ops {
- int (*power_update_mode)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
- int (*power_update_device_mode)(struct iwl_mvm *mvm);
- int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
- void (*power_update_binding)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool assign);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- char *buf, int bufsz);
-#endif
-};
-
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@@ -239,6 +224,17 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ,
};
+enum iwl_mvm_ref_type {
+ IWL_MVM_REF_UCODE_DOWN,
+ IWL_MVM_REF_SCAN,
+ IWL_MVM_REF_ROC,
+ IWL_MVM_REF_P2P_CLIENT,
+ IWL_MVM_REF_AP_IBSS,
+ IWL_MVM_REF_USER,
+
+ IWL_MVM_REF_COUNT,
+};
+
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
@@ -269,7 +265,9 @@ struct iwl_mvm_vif_bf_data {
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
- * interface should get quota etc.
+ * interface should get quota etc.
+ * @low_latency: indicates that this interface is in low-latency mode
+ * (VMACLowLatencyMode)
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
@@ -285,6 +283,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
bool monitor_active;
+ bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@@ -333,14 +332,13 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_slink;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
+ struct iwl_mac_power_cmd mac_pwr_cmd;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
-
- bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@@ -415,6 +413,7 @@ struct iwl_tt_params {
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
*/
@@ -422,6 +421,7 @@ struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
bool dynamic_smps;
u32 tx_backoff;
+ u32 min_backoff;
const struct iwl_tt_params *params;
bool throttle;
};
@@ -457,6 +457,8 @@ struct iwl_mvm {
bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
+ u32 umac_error_event_table;
+ bool support_umac_log;
u32 ampdu_ref;
@@ -470,7 +472,7 @@ struct iwl_mvm {
struct iwl_nvm_data *nvm_data;
/* NVM sections */
- struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -494,6 +496,17 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* broadcast filters to configure for each associated station */
+ const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ u32 override; /* u32 for debugfs_create_bool */
+ struct iwl_bcast_filter_cmd cmd;
+ } dbgfs_bcast_filtering;
+#endif
+#endif
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -526,6 +539,9 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ /* A bitmap of reference types taken by the driver. */
+ unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
+
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
@@ -548,6 +564,10 @@ struct iwl_mvm {
#endif
#endif
+ /* d0i3 */
+ u8 d0i3_ap_sta_id;
+ struct work_struct d0i3_exit_work;
+
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
@@ -557,8 +577,6 @@ struct iwl_mvm {
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
- const struct iwl_mvm_power_ops *pm_ops;
-
#ifdef CONFIG_NL80211_TESTMODE
u32 noa_duration;
struct ieee80211_vif *noa_vif;
@@ -572,7 +590,9 @@ struct iwl_mvm {
u8 bound_vif_cnt;
/* Indicate if device power save is allowed */
- bool ps_prevented;
+ bool ps_disabled;
+ /* Indicate if device power management is allowed */
+ bool pm_disabled;
};
/* Extract MVM priv from op_mode and _hw */
@@ -595,6 +615,24 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -661,6 +699,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -773,48 +813,19 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
-/* power managment */
-static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_update_mode(mvm, vif);
-}
-
-static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_disable(mvm, vif);
-}
-
-static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
-{
- if (mvm->pm_ops->power_update_device_mode)
- return mvm->pm_ops->power_update_device_mode(mvm);
- return 0;
-}
+/* power management */
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
-{
- if (mvm->pm_ops->power_update_binding)
- mvm->pm_ops->power_update_binding(mvm, vif, assign);
-}
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- char *buf, int bufsz)
-{
- return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
-}
-#endif
-
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -841,6 +852,10 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
#endif
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -854,6 +869,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@@ -875,25 +891,51 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{}
#endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags);
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value);
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+ /*
+ * should this consider associated/active/... state?
+ *
+ * Normally low-latency should only be active on interfaces
+ * that are active, but at least with debugfs it can also be
+ * enabled on interfaces that aren't active. However, when
+ * interface aren't active then they aren't added into the
+ * binding, so this has no real impact. For now, just return
+ * the current desired low-latency state.
+ */
+
+ return mvmvif->low_latency;
+}
+
/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 35b71af78d02..2d5251b3600c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -67,14 +67,6 @@
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
-/* list of NVM sections we are allowed/need to read */
-static const int nvm_to_read[] = {
- NVM_SECTION_TYPE_HW,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
-};
-
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 7000
@@ -240,7 +232,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
/* Checking for required sections */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
+ !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty NVM sections\n");
return NULL;
}
@@ -248,7 +240,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
if (WARN_ON(!mvm->cfg))
return NULL;
- hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+ hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
@@ -367,7 +359,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+ if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
@@ -415,6 +407,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
int ret, i, section;
u8 *nvm_buffer, *temp;
+ if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ return -EINVAL;
+
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
/* move to External NVM flow */
@@ -422,6 +417,14 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
if (ret)
return ret;
} else {
+ /* list of NVM sections we are allowed/need to read */
+ int nvm_to_read[] = {
+ mvm->cfg->nvm_hw_section_num,
+ NVM_SECTION_TYPE_SW,
+ NVM_SECTION_TYPE_CALIBRATION,
+ NVM_SECTION_TYPE_PRODUCTION,
+ };
+
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
@@ -446,10 +449,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
#ifdef CONFIG_IWLWIFI_DEBUGFS
switch (section) {
- case NVM_SECTION_TYPE_HW:
- mvm->nvm_hw_blob.data = temp;
- mvm->nvm_hw_blob.size = ret;
- break;
case NVM_SECTION_TYPE_SW:
mvm->nvm_sw_blob.data = temp;
mvm->nvm_sw_blob.size = ret;
@@ -463,6 +462,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_prod_blob.size = ret;
break;
default:
+ if (section == mvm->cfg->nvm_hw_section_num) {
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ }
WARN(1, "section: %d", section);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a3d43de342d7..a46f0b8b0870 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -185,9 +185,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
struct iwl_rx_handlers {
@@ -222,10 +223,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_scan_offload_complete_notif, false),
+ iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
false),
@@ -284,9 +287,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
+ CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
+ CMD(D0I3_END_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
CMD(OFFLOADS_QUERY_CMD),
CMD(REMOTE_WAKE_CONFIG_CMD),
@@ -309,6 +314,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(BCAST_FILTER_CMD),
CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
@@ -320,6 +326,24 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
+
+ if (!pwr_tx_backoff)
+ return 0;
+
+ while (pwr_tx_backoff->pwr) {
+ if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
+ return pwr_tx_backoff->backoff;
+
+ pwr_tx_backoff++;
+ }
+
+ return 0;
+}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@@ -333,6 +357,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
TX_CMD,
};
int err, scan_size;
+ u32 min_backoff;
+
+ /*
+ * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * index all over the driver - check that its value corresponds to the
+ * array size.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
/********************************
* 1. Allocating and configuring HW data
@@ -373,6 +405,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+ INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
@@ -421,7 +454,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- iwl_mvm_tt_initialize(mvm);
+ min_backoff = calc_min_backoff(trans, cfg);
+ iwl_mvm_tt_initialize(mvm, min_backoff);
/*
* If the NVM exists in an external file,
@@ -462,13 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
- mvm->pm_ops = &pm_mac_ops;
- else
- mvm->pm_ops = &pm_legacy_ops;
-
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+ /* rpm starts with a taken ref. only set the appropriate bit here. */
+ set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
+
return op_mode;
out_unregister:
@@ -508,7 +540,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
iwl_free_nvm_data(mvm->nvm_data);
- for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
ieee80211_free_hw(mvm->hw);
@@ -703,6 +735,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
iwl_abort_notification_waits(&mvm->notif_wait);
/*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted scan when that was already done which
+ * is not a problem. It is necessary to abort any os scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211. If restart isn't called
+ * (no fw restart) scan status will stay busy.
+ */
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ case IWL_MVM_SCAN_SCHED:
+ /* Sched scan will be restarted by mac80211 in restart_hw. */
+ if (!mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
+ }
+
+ /*
* If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
* If WoWLAN fw asserted, don't restart either, mac80211
@@ -733,25 +788,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
- /*
- * This is a bit racy, but worst case we tell mac80211 about
- * a stopped/aborted (sched) scan when that was already done
- * which is not a problem. It is necessary to abort any scan
- * here because mac80211 requires having the scan cleared
- * before restarting.
- * We'll reset the scan_status to NONE in restart cleanup in
- * the next start() call from mac80211.
- */
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
- ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+ /* don't let the transport/FW power down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (mvm->restart_fw > 0)
mvm->restart_fw--;
@@ -778,6 +816,163 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm);
}
+struct iwl_d0i3_iter_data {
+ struct iwl_mvm *mvm;
+ u8 ap_sta_id;
+ u8 vif_count;
+};
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d0i3_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+ IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+
+ /*
+ * on init/association, mvm already configures POWER_TABLE_CMD
+ * and REPLY_MCAST_FILTER_CMD, so currently don't
+ * reconfigure them (we might want to use different
+ * params later on, though).
+ */
+ data->ap_sta_id = mvmvif->ap_sta_id;
+ data->vif_count++;
+}
+
+static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+ int ret;
+ struct iwl_d0i3_iter_data d0i3_iter_data = {
+ .mvm = mvm,
+ };
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+ };
+ struct iwl_d3_manager_config d3_cfg_cmd = {
+ .min_sleep_time = cpu_to_le32(1000),
+ };
+
+ IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_enter_d0i3_iterator,
+ &d0i3_iter_data);
+ if (d0i3_iter_data.vif_count == 1) {
+ mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+ } else {
+ WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+ flags | CMD_MAKE_TRANS_IDLE,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+ IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
+ mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ ieee80211_connection_loss(vif);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+ struct iwl_host_cmd get_status_cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status_v6 *status;
+ int ret;
+ u32 disconnection_reasons, wakeup_reasons;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+ if (ret)
+ goto out;
+
+ if (!get_status_cmd.resp_pkt)
+ goto out;
+
+ status = (void *)get_status_cmd.resp_pkt->data;
+ wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+
+ IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+ disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+ if (wakeup_reasons & disconnection_reasons)
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d0i3_disconnect_iter, mvm);
+
+ iwl_free_resp(&get_status_cmd);
+out:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+ CMD_WAKE_UP_TRANS;
+ int ret;
+
+ IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+ if (ret)
+ goto out;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_exit_d0i3_iterator,
+ mvm);
+out:
+ schedule_work(&mvm->d0i3_exit_work);
+ return ret;
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -789,4 +984,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_error = iwl_mvm_nic_error,
.cmd_queue_full = iwl_mvm_cmd_queue_full,
.nic_config = iwl_mvm_nic_config,
+ .enter_d0i3 = iwl_mvm_enter_d0i3,
+ .exit_d0i3 = iwl_mvm_exit_d0i3,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d9eab3b7bb9f..4da1ea44f39a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -74,39 +74,36 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd)
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 flags)
{
- int ret;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
- sizeof(struct iwl_beacon_filter_cmd), cmd);
-
- if (!ret) {
- IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
- le32_to_cpu(cmd->ba_enable_beacon_abort));
- IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
- le32_to_cpu(cmd->ba_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
- le32_to_cpu(cmd->bf_debug_flag));
- IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
- le32_to_cpu(cmd->bf_enable_beacon_filter));
- IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
- le32_to_cpu(cmd->bf_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_roaming_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
- le32_to_cpu(cmd->bf_roaming_state));
- IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
- le32_to_cpu(cmd->bf_temp_threshold));
- IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_fast_filter));
- IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_slow_filter));
- }
- return ret;
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ le32_to_cpu(cmd->ba_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ le32_to_cpu(cmd->bf_debug_flag));
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ le32_to_cpu(cmd->bf_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
+
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ sizeof(struct iwl_beacon_filter_cmd), cmd);
}
static
@@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- mvm->ps_prevented)
+ if (mvm->ps_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
- if (!vif->bss_conf.ps || mvmvif->pm_prevented)
+ if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
+ mvm->pm_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -419,11 +416,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
-static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- int ret;
- bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION)
@@ -435,56 +430,30 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mac_power_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color));
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
- iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
}
-static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- force_disable)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ mvm->ps_disabled = true;
+
+ if (mvm->ps_disabled)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
&cmd);
}
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
-{
- return _iwl_mvm_power_update_device(mvm, false);
-}
-
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -544,44 +508,137 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_power_constraint {
+ struct ieee80211_vif *bf_vif;
+ struct ieee80211_vif *bss_vif;
+ bool pm_disabled;
+ bool ps_disabled;
+};
+
+static void iwl_mvm_power_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = _data;
- int ret;
+ struct iwl_power_constraint *power_iterator = _data;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ /* no BSS power mgmt if we have an active AP */
+ if (mvmvif->ap_ibss_active)
+ power_iterator->pm_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_MONITOR:
+ /* no BSS power mgmt and no device power save */
+ power_iterator->pm_disabled = true;
+ power_iterator->ps_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ /* no BSS power mgmt if we have a P2P client*/
+ power_iterator->pm_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ /* we should have only one BSS vif */
+ WARN_ON(power_iterator->bss_vif);
+ power_iterator->bss_vif = vif;
+
+ if (mvmvif->bf_data.bf_enabled &&
+ !WARN_ON(power_iterator->bf_vif))
+ power_iterator->bf_vif = vif;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
+ struct iwl_power_constraint *constraint)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ constraint->pm_disabled = true;
+ constraint->ps_disabled = true;
+ }
- mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_iterator, constraint);
- ret = iwl_mvm_power_mac_update_mode(mvm, vif);
- WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+ /* TODO: remove this and determine this variable in the iterator */
+ if (mvm->bound_vif_cnt > 1)
+ constraint->pm_disabled = true;
}
-static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_power_constraint constraint = {};
+ bool ba_enable;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
+ iwl_mvm_power_get_global_constraint(mvm, &constraint);
+ mvm->ps_disabled = constraint.ps_disabled;
+ mvm->pm_disabled = constraint.pm_disabled;
+
+ /* don't update device power state unless we add / remove monitor */
if (vif->type == NL80211_IFTYPE_MONITOR) {
- int ret = _iwl_mvm_power_update_device(mvm, assign);
- mvm->ps_prevented = assign;
- WARN_ONCE(ret, "Failed to update power device state\n");
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
}
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_binding_iterator,
- mvm);
+ ret = iwl_mvm_power_send_cmd(mvm, vif);
+ if (ret)
+ return ret;
+
+ if (constraint.bss_vif && vif != constraint.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (!constraint.bf_vif)
+ return 0;
+
+ vif = constraint.bf_vif;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+ !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
+
+ return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (WARN_ON(!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+ mutex_unlock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@@ -685,32 +742,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
}
#endif
-int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 cmd_flags,
+ bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- };
int ret;
if (mvmvif != mvm->bf_allowed_vif ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ if (!d0i3)
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
- if (!ret)
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
mvmvif->bf_data.bf_enabled = true;
return ret;
}
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u32 flags)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -720,7 +791,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -728,23 +799,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags)
{
+ int ret;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
- if (!mvmvif->bf_data.bf_enabled)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- return iwl_mvm_enable_beacon_filter(mvm, vif);
-}
+ if (!vif->bss_conf.assoc)
+ return 0;
-const struct iwl_mvm_power_ops pm_mac_ops = {
- .power_update_mode = iwl_mvm_power_mac_update_mode,
- .power_update_device_mode = iwl_mvm_power_update_device,
- .power_disable = iwl_mvm_power_mac_disable,
- .power_update_binding = _iwl_mvm_power_update_binding,
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (enable) {
+ /* configure skip over dtim up to 300 msec */
+ int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ if (WARN_ON(!dtimper_msec))
+ return 0;
+
+ cmd.flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd.skip_dtim_periods = 300 / dtimper_msec;
+ }
+ iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+ memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
-};
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* configure beacon filtering */
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (enable) {
+ struct iwl_beacon_filter_cmd cmd_bf = {
+ IWL_BF_CMD_CONFIG_D0I3,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+ ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+ flags, true);
+ } else {
+ if (mvmvif->bf_data.bf_enabled)
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+ else
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (!mvmvif->bf_data.bf_enabled) {
+ /* disable beacon filtering explicitly if force is true */
+ if (force)
+ return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return 0;
+ }
+
+ return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+}
+
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
+{
+ struct iwl_powertable_cmd cmd = {
+ .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
deleted file mode 100644
index ef712ae5bc62..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-debug.h"
-#include "mvm.h"
-#include "iwl-modparams.h"
-#include "fw-api-power.h"
-
-#define POWER_KEEP_ALIVE_PERIOD_SEC 25
-
-static void iwl_mvm_power_log(struct iwl_mvm *mvm,
- struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(mvm,
- "Sending power table command for power level %d, flags = 0x%X\n",
- iwlmvm_mod_params.power_scheme,
- le16_to_cpu(cmd->flags));
- IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
- le32_to_cpu(cmd->tx_data_timeout));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
- IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
- le32_to_cpu(cmd->skip_dtim_periods));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- le32_to_cpu(cmd->lprx_rssi_threshold));
- }
-}
-
-static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd)
-{
- struct ieee80211_hw *hw = mvm->hw;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_channel *chan;
- int dtimper, dtimper_msec;
- int keep_alive;
- bool radar_detect = false;
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- /*
- * Regardless of power management state the driver must set
- * keep alive period. FW will use it for sending keep alive NDPs
- * immediately after association.
- */
- cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
-
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.assoc)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- if (!vif->bss_conf.ps)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
- if (vif->bss_conf.beacon_rate &&
- (vif->bss_conf.beacon_rate->bitrate == 10 ||
- vif->bss_conf.beacon_rate->bitrate == 60)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- cmd->lprx_rssi_threshold =
- cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
- }
-
- dtimper = hw->conf.ps_dtim_period ?: 1;
-
- /* Check if radar detection is required on current channel */
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- WARN_ON(!chanctx_conf);
- if (chanctx_conf) {
- chan = chanctx_conf->def.chan;
- radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
- }
- rcu_read_unlock();
-
- /* Check skip over DTIM conditions */
- if (!radar_detect && (dtimper <= 10) &&
- (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
- mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = cpu_to_le32(3);
- }
-
- /* Check that keep alive period is at least 3 * DTIM */
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * cmd->keep_alive_seconds);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
- cmd->keep_alive_seconds = keep_alive;
-
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- } else {
- cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
- cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
- if (mvmvif->dbgfs_pm.skip_over_dtim)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- else
- cmd->flags &=
- cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
- cmd->rx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
- cmd->tx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
- cmd->skip_dtim_periods =
- cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
- if (mvmvif->dbgfs_pm.lprx_ena)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- else
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
- cmd->lprx_rssi_threshold =
- cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
-#endif /* CONFIG_IWLWIFI_DEBUGFS */
-}
-
-static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- int ret;
- bool ba_enable;
- struct iwl_powertable_cmd cmd = {};
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- /*
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- */
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count > 1)
- return 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_powertable_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- iwl_mvm_power_log(mvm, &cmd);
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
- sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
-{
- struct iwl_powertable_cmd cmd = {};
- int pos = 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- le32_to_cpu(cmd.skip_dtim_periods));
- pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
- iwlmvm_mod_params.power_scheme);
- pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
- le16_to_cpu(cmd.flags));
- pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
- cmd.keep_alive_seconds);
-
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- le32_to_cpu(cmd.lprx_rssi_threshold));
- }
- return pos;
-}
-#endif
-
-const struct iwl_mvm_power_ops pm_legacy_ops = {
- .power_update_mode = iwl_mvm_power_legacy_update_mode,
- .power_disable = iwl_mvm_power_legacy_disable,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
-#endif
-};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index ce5db6c4ef7e..06d8429be1fb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -65,9 +65,14 @@
#include "fw-api.h"
#include "mvm.h"
+#define QUOTA_100 IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
struct iwl_mvm_quota_iterator_data {
int n_interfaces[MAX_BINDINGS];
int colors[MAX_BINDINGS];
+ int low_latency[MAX_BINDINGS];
+ int n_low_latency_bindings;
struct ieee80211_vif *new_vif;
};
@@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
if (vif->bss_conf.assoc)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
if (mvmvif->ap_ibss_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_MONITOR:
if (mvmvif->monitor_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_P2P_DEVICE:
- break;
+ return;
default:
WARN_ON_ONCE(1);
- break;
+ return;
+ }
+
+ data->n_interfaces[id]++;
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+ data->n_low_latency_bindings++;
+ data->low_latency[id] = true;
}
}
@@ -162,12 +174,13 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
- int i, idx, ret, num_active_macs, quota, quota_rem;
+ int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
+ u32 ll_max_duration;
lockdep_assert_held(&mvm->mutex);
@@ -186,6 +199,21 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
}
+ switch (data.n_low_latency_bindings) {
+ case 0: /* no low latency - use default */
+ ll_max_duration = 0;
+ break;
+ case 1: /* SingleBindingLowLatencyMode */
+ ll_max_duration = IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR;
+ break;
+ case 2: /* DualBindingLowLatencyMode */
+ ll_max_duration = IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR;
+ break;
+ default: /* MultiBindingLowLatencyMode */
+ ll_max_duration = 0;
+ break;
+ }
+
/*
* The FW's scheduling session consists of
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
@@ -197,11 +225,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
num_active_macs += data.n_interfaces[i];
}
- quota = 0;
- quota_rem = 0;
- if (num_active_macs) {
- quota = IWL_MVM_MAX_QUOTA / num_active_macs;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
+ n_non_lowlat = num_active_macs;
+
+ if (data.n_low_latency_bindings == 1) {
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (data.low_latency[i]) {
+ n_non_lowlat -= data.n_interfaces[i];
+ break;
+ }
+ }
+ }
+
+ if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+ /*
+ * Reserve quota for the low latency binding in case that
+ * there are several data bindings but only a single
+ * low latency one. Split the rest of the quota equally
+ * between the other data interfaces.
+ */
+ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+ quota_rem = QUOTA_100 - n_non_lowlat * quota -
+ QUOTA_LOWLAT_MIN;
+ } else if (num_active_macs) {
+ /*
+ * There are 0 or more than 1 low latency bindings, or all the
+ * data interfaces belong to the single low latency binding.
+ * Split the quota equally between the data interfaces.
+ */
+ quota = QUOTA_100 / num_active_macs;
+ quota_rem = QUOTA_100 % num_active_macs;
+ } else {
+ /* values don't really matter - won't be used */
+ quota = 0;
+ quota_rem = 0;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -211,19 +267,42 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
- if (data.n_interfaces[i] <= 0) {
+ if (data.n_interfaces[i] <= 0)
cmd.quotas[idx].quota = cpu_to_le32(0);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
- } else {
+ else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+ data.low_latency[i])
+ /*
+ * There is more than one binding, but only one of the
+ * bindings is in low latency. For this case, allocate
+ * the minimal required quota for the low latency
+ * binding.
+ */
+ cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+
+ else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
+
+ WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
+ "Binding=%d, quota=%u > max=%u\n",
+ idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
+
+ if (data.n_interfaces[i] && !data.low_latency[i])
+ cmd.quotas[idx].max_duration =
+ cpu_to_le32(ll_max_duration);
+ else
cmd.quotas[idx].max_duration = cpu_to_le32(0);
- }
+
idx++;
}
- /* Give the remainder of the session to the first binding */
- le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ /* Give the remainder of the session to the first data binding */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
+ le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
+ break;
+ }
+ }
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 6abf74e1351f..32bb8075121c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
/* Expected TpT tables. 4 indexes:
* 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
*/
-static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
{0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
};
-static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
{0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
{0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
};
-static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
{0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@@ -905,7 +905,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rate->bw = RATE_MCS_CHAN_WIDTH_20;
- WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
rate->index > IWL_RATE_MCS_9_INDEX);
rate->index = rs_ht_to_legacy[rate->index];
@@ -1169,12 +1169,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
-static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
const struct rs_tx_column *column,
u32 bw)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
column->mode != RS_SISO &&
@@ -1262,9 +1262,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -1479,7 +1478,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
- s32 *expected_tpt_tbl;
+ const u16 *expected_tpt_tbl;
s32 tpt, max_expected_tpt;
for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@@ -2815,8 +2814,8 @@ static void rs_rate_init_stub(void *mvm_r,
struct ieee80211_sta *sta, void *mvm_sta)
{
}
-static struct rate_control_ops rs_mvm_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_mvm_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 7bc6404f6986..3332b396011e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -277,7 +277,7 @@ enum rs_column {
struct iwl_scale_tbl_info {
struct rs_rate rate;
enum rs_column column;
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index a85b60f7e67e..fa3c1393e103 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -129,22 +129,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- int rssi_all_band_a, rssi_all_band_b;
- u32 agc_a, agc_b, max_agc;
+ u32 agc_a, agc_b;
u32 val;
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
- max_agc = max_t(u32, agc_a, agc_b);
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
- rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_A_POS;
- rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_B_POS;
/*
* dBm = rssi dB - agc dB - constant.
@@ -364,10 +358,10 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status.flag |= RX_FLAG_80MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status.flag |= RX_FLAG_160MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 742afc429c94..eba55cc3352d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -408,6 +408,8 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
return 0;
}
@@ -476,6 +478,7 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status = IWL_MVM_SCAN_NONE;
return;
}
@@ -488,7 +491,7 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
- /* mac80211's state will be cleaned in the fw_restart flow */
+ /* mac80211's state will be cleaned in the nic_restart flow */
goto out_remove_notif;
}
@@ -509,11 +512,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+ /* scan status must be locked for proper checking */
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted");
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ /* might already be something else again, don't reset if so */
+ if (mvm->scan_status == IWL_MVM_SCAN_SCHED)
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_sched_scan_stopped(mvm->hw);
return 0;
@@ -596,6 +604,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* config match list.
*/
for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ /* skip empty SSID matchsets */
+ if (!req->match_sets[i].ssid.ssid_len)
+ continue;
scan->direct_scan[i].id = WLAN_EID_SSID;
scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 3397f59cd4e4..2677d1c0e1a1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,27 +66,27 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
{
memset(cmd_v5, 0, sizeof(*cmd_v5));
- cmd_v5->add_modify = cmd_v6->add_modify;
- cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v6->sta_id;
- cmd_v5->modify_mask = cmd_v6->modify_mask;
- cmd_v5->station_flags = cmd_v6->station_flags;
- cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v6->assoc_id;
- cmd_v5->beamform_flags = cmd_v6->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+ cmd_v5->add_modify = cmd_v7->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v7->sta_id;
+ cmd_v5->modify_mask = cmd_v7->modify_mask;
+ cmd_v5->station_flags = cmd_v7->station_flags;
+ cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v7->assoc_id;
+ cmd_v5->beamform_flags = cmd_v7->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
}
static void
@@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
}
static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ struct iwl_mvm_add_sta_cmd_v7 *cmd,
int *status)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
cmd, status);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
&cmd_v5, status);
}
static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v6 *cmd)
+ struct iwl_mvm_add_sta_cmd_v7 *cmd)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
sizeof(*cmd), cmd);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
&cmd_v5);
@@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
&sta_cmd);
}
-static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+ enum nl80211_iftype iftype)
{
int sta_id;
+ u32 reserved_ids = 0;
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
+ /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+ if (iftype != NL80211_IFTYPE_STATION)
+ reserved_ids = BIT(0);
+
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+ for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+ if (BIT(sta_id) & reserved_ids)
+ continue;
+
if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)))
return sta_id;
+ }
return IWL_MVM_STATION_COUNT;
}
@@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
else
sta_id = mvm_sta->sta_id;
@@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
/*
@@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
}
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask)
+ u32 qmask, enum nl80211_iftype iftype)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
}
@@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd;
+ struct iwl_mvm_add_sta_cmd_v7 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Add the aux station, but without any queues */
- ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
+ NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
@@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
- ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+ ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret)
return ret;
@@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
-static const u8 tid_to_ac[] = {
+static const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
@@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
IEEE80211_AC_VO,
};
+static const u8 tid_to_ucode_ac[] = {
+ AC_BE,
+ AC_BK,
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VI,
+ AC_VO,
+ AC_VO,
+};
+
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
@@ -874,7 +903,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
/* the new tx queue is still connected to the same mac80211 queue */
- mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+ mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
@@ -916,7 +945,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->ssn = 0xffff;
spin_unlock_bh(&mvmsta->lock);
- fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+ fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1411,7 +1440,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1427,28 +1456,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt)
+ u16 cnt, u16 tids, bool more_data,
+ bool agg)
{
- u16 sleep_state_flags =
- (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
- STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
.sleep_tx_count = cpu_to_le16(cnt),
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
- /*
- * Same modify mask for sleep_tx_count and sleep_state_flags so
- * we must set the sleep_state_flags too.
- */
- .sleep_state_flags = cpu_to_le16(sleep_state_flags),
};
- int ret;
+ int tid, ret;
+ unsigned long _tids = tids;
+
+ /* convert TIDs to ACs - we don't support TSPEC so that's OK
+ * Note that this field is reserved and unused by firmware not
+ * supporting GO uAPSD, so it's safe to always do this.
+ */
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+ cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+ /* If we're releasing frames from aggregation queues then check if the
+ * all queues combined that we're releasing frames from have
+ * - more frames than the service period, in which case more_data
+ * needs to be set
+ * - fewer than 'cnt' frames, in which case we need to adjust the
+ * firmware command (but do that unconditionally)
+ */
+ if (agg) {
+ int remaining = cnt;
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+ struct iwl_mvm_tid_data *tid_data;
+ u16 n_queued;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (WARN(tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
+ "TID %d state is %d\n",
+ tid, tid_data->state)) {
+ spin_unlock_bh(&mvmsta->lock);
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+
+ n_queued = iwl_mvm_tid_queued(tid_data);
+ if (n_queued > remaining) {
+ more_data = true;
+ remaining = 0;
+ break;
+ }
+ remaining -= n_queued;
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
+ if (WARN_ON(cnt - remaining == 0)) {
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+ }
+
+ /* Note: this is ignored by firmware not supporting GO uAPSD */
+ if (more_data)
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+ mvmsta->next_status_eosp = true;
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+ } else {
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+ }
- /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
+
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ return 0;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (!IS_ERR_OR_NULL(sta))
+ ieee80211_sta_eosp(sta);
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4968d0237dc5..2ed84c421481 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -195,24 +195,33 @@ struct iwl_mvm;
/**
* DOC: AP mode - PS
*
- * When a station is asleep, the fw will set it as "asleep". All the
- * non-aggregation frames to that station will be dropped by the fw
- * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
* AMPDUs are in a separate queue that is stopped by the fw. We just need to
- * let mac80211 know how many frames we have in these queues so that it can
+ * let mac80211 know when there are frames in these queues so that it can
* properly handle trigger frames.
- * When the a trigger frame is received, mac80211 tells the driver to send
- * frames from the AMPDU queues or AC queue depending on which queue are
- * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
- * all the knowledege since all the non-agg frames are buffered / filtered, and
- * the driver tells mac80211 about agg frames). The driver needs to tell the fw
- * to let frames out even if the station is asleep. This is done by
- * %iwl_mvm_sta_modify_sleep_tx_count.
- * When we receive a frame from that station with PM bit unset, the
- * driver needs to let the fw know that this station isn't alseep any more.
- * This is done by %iwl_mvm_sta_modify_ps_wake.
- *
- * TODO - EOSP handling
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
*/
/**
@@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
u16 ssn;
};
+static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
+{
+ return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+ tid_data->next_reclaimed);
+}
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
+ * by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ * we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@@ -287,7 +306,9 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
+ bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
+ bool next_status_eosp;
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
@@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start);
@@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask);
+ u32 qmask, enum nl80211_iftype iftype);
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta);
int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt);
+ u16 cnt, u16 tids, bool more_data,
+ bool agg);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b4c2abaa297b..e145dd41e85e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
* in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
/*
* Of course, our status bit is just as racy as mac80211, so in
@@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
}
} else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 3afa6b6bf835..7a99fa361954 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
}
}
-static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
{
struct iwl_host_cmd cmd = {
.id = REPLY_THERMAL_MNG_BACKOFF,
@@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.flags = CMD_SYNC,
};
+ backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
backoff);
@@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
tt->params = &iwl7000_tt_params;
tt->throttle = false;
+ tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4df12fa9d336..74d60bf27750 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -377,6 +377,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* From now on, we cannot access info->control */
+ /*
+ * we handle that entirely ourselves -- for uAPSD the firmware
+ * will always send a notification, and for PS-Poll responses
+ * we'll notify mac80211 when getting frame status
+ */
+ info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
spin_lock(&mvmsta->lock);
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@@ -437,6 +444,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
+ if ((tid_data->state == IWL_AGG_ON ||
+ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ iwl_mvm_tid_queued(tid_data) == 0) {
+ /*
+ * Now that this aggregation queue is empty tell mac80211 so it
+ * knows we no longer have frames buffered for the station on
+ * this TID (for the TIM bitmap calculation.)
+ */
+ ieee80211_sta_set_buffered(sta, tid, false);
+ }
+
if (tid_data->ssn != tid_data->next_reclaimed)
return;
@@ -680,6 +698,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
+
+ if (mvmsta->next_status_eosp) {
+ mvmsta->next_status_eosp = false;
+ ieee80211_sta_eosp(sta);
+ }
} else {
mvmsta = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 86989df69356..1493f79e67e0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -376,9 +376,67 @@ struct iwl_error_event_table {
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed;
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 line; /* source code line of error */
+ u32 umac_ver; /* umac version */
+} __packed;
+
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_umac_error_event_table table;
+ u32 base;
+
+ base = mvm->umac_error_event_table;
+
+ if (base < 0x800000 || base >= 0x80C000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
+}
+
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
@@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000 || base >= 0x80C000) {
+ if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@@ -453,13 +511,17 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+ if (mvm->support_umac_log)
+ iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
{
const struct fw_img *img;
int ofs, len = 0;
- u8 *buf;
+ int i;
+ __le32 *buf;
if (!mvm->ucode_loaded)
return;
@@ -473,7 +535,12 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
return;
iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
- iwl_print_hex_error(mvm->trans, buf, len);
+ len = len >> 2;
+ for (i = 0; i < len; i++) {
+ IWL_ERR(mvm, "0x%08X\n", le32_to_cpu(buf[i]));
+ /* Add a small delay to let syslog catch up */
+ udelay(10);
+ }
kfree(buf);
}
@@ -516,7 +583,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
- enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ enum ieee80211_smps_mode smps_mode;
int i;
lockdep_assert_held(&mvm->mutex);
@@ -525,6 +592,11 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
return;
+ if (vif->type == NL80211_IFTYPE_AP)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@@ -538,3 +610,22 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int res;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmvif->low_latency = value;
+
+ res = iwl_mvm_update_quotas(mvm, NULL);
+ if (res)
+ return res;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ return iwl_mvm_power_update_mac(mvm, vif);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f47bcbe2945a..0f52e961a5a5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -66,6 +66,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
@@ -390,12 +391,91 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_ERR(trans, "Could not retrieve root port ACPI handle");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -420,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
}
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08c23d497a02..41f684deff97 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -802,10 +802,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta;
- lockdep_assert_held(&trans_pcie->irq_lock);
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
trace_iwlwifi_dev_irq(trans->dev);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f9507807b486..84d471299e5a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -89,6 +89,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -132,8 +133,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
@@ -203,19 +205,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
/*
* Enable DMA clock and wait for it to stabilize.
*
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- /* Clear the interrupt in APMG if the NIC is in RFKILL */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -273,7 +279,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
- iwl_pcie_set_pwr(trans, false);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -435,78 +442,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
- u32 address;
- int ret = 0;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
- address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ *first_ucode_section = 0;
} else {
shift_param = 16;
- address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ (*first_ucode_section)++;
}
- /* set CPU to started */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_STARTED << shift_param,
- 1);
-
- /* set last complete descriptor number */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
- << shift_param,
- 1);
-
- /* set last loaded block */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
- << shift_param,
- 1);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ if (i == (*first_ucode_section) + 1)
+ /* set CPU to started */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_HDRS_LOADING_COMPLETED
+ << shift_param);
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
/* image loading complete */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_COMPLETED
- << shift_param,
- 1);
-
- /* set FH_TCSR_0_REG */
- iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
-
- /* verify image verification started */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_TIME_OUT);
- if (ret < 0) {
- IWL_ERR(trans, "secure boot process didn't start\n");
- return ret;
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
}
- /* wait for image verification to complete */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_TIME_OUT);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
- if (ret < 0) {
- IWL_ERR(trans, "Time out on secure boot process\n");
- return ret;
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
}
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
return 0;
}
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
- int i, ret = 0;
+ int ret = 0;
+ int first_ucode_section;
IWL_DEBUG_FW(trans,
"working with %s image\n",
@@ -518,53 +553,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* configure the ucode to be ready to get the secured image */
if (image->is_secure) {
/* set secure boot inspector addresses */
- iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
- iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
-
- /* release CPU1 reset if secure inspector image burned in OTP */
- iwl_write32(trans, CSR_RESET, 0);
- }
-
- /* load to FW the binary sections of CPU1 */
- IWL_DEBUG_INFO(trans, "Loading CPU1\n");
- for (i = 0;
- i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_CODE_ADDR,
+ LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_DATA_ADDR,
+ LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+ /* set CPU1 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+ LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- }
- /* configure the ucode to start secure process on CPU1 */
- if (image->is_secure) {
- /* config CPU1 to start secure protocol */
- ret = iwl_pcie_secure_set(trans, 1);
+ } else {
+ /* load to FW the binary Non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- } else {
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
}
if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
/* load to FW the binary sections of CPU2 */
- IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
- for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
- }
+ if (image->is_secure)
+ ret = iwl_pcie_load_cpu_secured_sections(
+ trans, image, 2,
+ &first_ucode_section);
+ else
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
- if (image->is_secure) {
- /* set CPU2 for secure protocol */
- ret = iwl_pcie_secure_set(trans, 2);
- if (ret)
- return ret;
+ /* release CPU reset */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+ else
+ iwl_write32(trans, CSR_RESET, 0);
+
+ if (image->is_secure) {
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
}
}
@@ -1407,16 +1457,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char *buf = NULL;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_dump_fh(trans, &buf);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
+ ssize_t ret;
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3d549008b3e2..254126447c68 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -705,8 +705,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 32f75007a825..2d72a6b4b93e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1766,7 +1766,8 @@ static void lbs_join_post(struct lbs_private *priv,
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
- cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
+ GFP_KERNEL);
/* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
priv->connect_status = LBS_CONNECTED;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 69d4c3179d04..f7e3562542fe 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -57,6 +57,10 @@ static bool rctbl = false;
module_param(rctbl, bool, 0444);
MODULE_PARM_DESC(rctbl, "Handle rate control table");
+static bool support_p2p_device = true;
+module_param(support_p2p_device, bool, 0444);
+MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -335,7 +339,8 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+ /* must be last, see hwsim_if_comb */
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
};
static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
@@ -345,6 +350,27 @@ static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
static const struct ieee80211_iface_combination hwsim_if_comb[] = {
{
.limits = hwsim_if_limits,
+ /* remove the last entry which is P2P_DEVICE */
+ .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ },
+ {
+ .limits = hwsim_if_dfs_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
+ {
+ .limits = hwsim_if_limits,
.n_limits = ARRAY_SIZE(hwsim_if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
@@ -451,7 +477,7 @@ static struct genl_family hwsim_genl_family = {
/* MAC80211_HWSIM netlink policy */
-static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
@@ -468,6 +494,7 @@ static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1035,32 +1062,6 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
ack = true;
rx_status.mactime = now + data2->tsf_offset;
-#if 0
- /*
- * Don't enable this code by default as the OUI 00:00:00
- * is registered to Xerox so we shouldn't use it here, it
- * might find its way into pcap files.
- * Note that this code requires the headroom in the SKB
- * that was allocated earlier.
- */
- rx_status.vendor_radiotap_oui[0] = 0x00;
- rx_status.vendor_radiotap_oui[1] = 0x00;
- rx_status.vendor_radiotap_oui[2] = 0x00;
- rx_status.vendor_radiotap_subns = 127;
- /*
- * Radiotap vendor namespaces can (and should) also be
- * split into fields by using the standard radiotap
- * presence bitmap mechanism. Use just BIT(0) here for
- * the presence bitmap.
- */
- rx_status.vendor_radiotap_bitmap = BIT(0);
- /* We have 8 bytes of (dummy) data */
- rx_status.vendor_radiotap_len = 8;
- /* For testing, also require it to be aligned */
- rx_status.vendor_radiotap_align = 8;
- /* push the data */
- memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
-#endif
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -1275,6 +1276,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
+
+ if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ ieee80211_csa_finish(vif);
}
static enum hrtimer_restart
@@ -1936,7 +1940,7 @@ static struct ieee80211_ops mac80211_hwsim_mchan_ops;
static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_regdomain *regd,
- bool reg_strict)
+ bool reg_strict, bool p2p_device)
{
int err;
u8 addr[ETH_ALEN];
@@ -2000,8 +2004,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
/* For channels > 1 DFS is not allowed */
hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
- data->if_combination = hwsim_if_comb[0];
data->if_combination.num_different_channels = data->channels;
+ if (p2p_device)
+ data->if_combination = hwsim_if_comb_p2p_dev[0];
+ else
+ data->if_combination = hwsim_if_comb[0];
+ } else if (p2p_device) {
+ hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(hwsim_if_comb_p2p_dev);
} else {
hw->wiphy->iface_combinations = hwsim_if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
@@ -2017,8 +2028,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ if (p2p_device)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
@@ -2027,13 +2040,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+ IEEE80211_HW_CHANCTX_STA_CSA;
if (rctbl)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_UAPSD;
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
/* ask mac80211 to reserve space for magic */
@@ -2407,6 +2422,7 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
const char *alpha2 = NULL;
const struct ieee80211_regdomain *regd = NULL;
bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
if (info->attrs[HWSIM_ATTR_CHANNELS])
chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
@@ -2422,7 +2438,8 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
+ p2p_device);
}
static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -2640,7 +2657,8 @@ static int __init init_mac80211_hwsim(void)
}
err = mac80211_hwsim_create_radio(channels, reg_alpha2,
- regd, reg_strict);
+ regd, reg_strict,
+ support_p2p_device);
if (err < 0)
goto out_free_radios;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 2747cce5a269..6e72996ec8c1 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -107,6 +107,7 @@ enum {
* (nla string, length 2)
* @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
* @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
+ * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag)
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -126,6 +127,7 @@ enum {
HWSIM_ATTR_REG_HINT_ALPHA2,
HWSIM_ATTR_REG_CUSTOM_REG,
HWSIM_ATTR_REG_STRICT_REG,
+ HWSIM_ATTR_SUPPORT_P2P_DEVICE,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4d71c7..bb43251c18f2 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -23,6 +23,31 @@
#include "main.h"
#include "11ac.h"
+/* Tables of the MCS map to the highest data rate (in Mbps) supported
+ * for long GI.
+ */
+static const u16 max_rate_lgi_80MHZ[8][3] = {
+ {0x124, 0x15F, 0x186}, /* NSS = 1 */
+ {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
+ {0x36D, 0x41D, 0x492}, /* NSS = 3 */
+ {0x492, 0x57C, 0x618}, /* NSS = 4 */
+ {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
+ {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
+ {0x924, 0xAF8, 0xC30} /* NSS = 8 */
+};
+
+static const u16 max_rate_lgi_160MHZ[8][3] = {
+ {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
+ {0x492, 0x57C, 0x618}, /* NSS = 2 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
+ {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
+ {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
+ {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
+ {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
+ {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
+};
+
/* This function converts the 2-bit MCS map to the highest long GI
* VHT data rate.
*/
@@ -30,33 +55,10 @@ static u16
mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
u8 bands, u16 mcs_map)
{
- u8 i, nss, max_mcs;
+ u8 i, nss, mcs;
u16 max_rate = 0;
u32 usr_vht_cap_info = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- /* tables of the MCS map to the highest data rate (in Mbps)
- * supported for long GI
- */
- u16 max_rate_lgi_80MHZ[8][3] = {
- {0x124, 0x15F, 0x186}, /* NSS = 1 */
- {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
- {0x36D, 0x41D, 0x492}, /* NSS = 3 */
- {0x492, 0x57C, 0x618}, /* NSS = 4 */
- {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
- {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
- {0x924, 0xAF8, 0xC30} /* NSS = 8 */
- };
- u16 max_rate_lgi_160MHZ[8][3] = {
- {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
- {0x492, 0x57C, 0x618}, /* NSS = 2 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
- {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
- {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
- {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
- {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
- {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
- };
if (bands & BAND_AAC)
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
@@ -64,29 +66,29 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
/* find the max NSS supported */
- nss = 0;
- for (i = 0; i < 8; i++) {
- max_mcs = (mcs_map >> (2 * i)) & 0x3;
- if (max_mcs < 3)
+ nss = 1;
+ for (i = 1; i <= 8; i++) {
+ mcs = GET_VHTNSSMCS(mcs_map, i);
+ if (mcs < IEEE80211_VHT_MCS_NOT_SUPPORTED)
nss = i;
}
- max_mcs = (mcs_map >> (2 * nss)) & 0x3;
+ mcs = GET_VHTNSSMCS(mcs_map, nss);
- /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */
- if (max_mcs >= 3)
- max_mcs = 2;
+ /* if mcs is 3, nss must be 1 (NSS = 1). Default mcs to MCS 0~9 */
+ if (mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
/* support 160 MHz */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS6 */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs - 1];
} else {
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS3 */
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs - 1];
}
return max_rate;
@@ -94,21 +96,20 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
static void
mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
if (bands & BAND_A)
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
else
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
}
-static void
-mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
u16 mcs_map_user, mcs_map_resp, mcs_map_result;
@@ -119,46 +120,48 @@ mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
/* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.rx_highest = cpu_to_le16(tmp);
/* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.tx_highest = cpu_to_le16(tmp);
return;
}
@@ -193,7 +196,8 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
sizeof(struct ieee_types_header),
le16_to_cpu(vht_cap->header.len));
- mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap,
+ bss_desc->bss_band);
*buffer += sizeof(*vht_cap);
ret_len += sizeof(*vht_cap);
}
@@ -300,3 +304,81 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
return;
}
+
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+ vht_oper = bss_desc->bcn_vht_oper;
+
+ if (!bss_desc->bcn_vht_cap || !vht_oper)
+ return false;
+
+ if (vht_oper->chan_width == IEEE80211_VHT_CHANWIDTH_USE_HT)
+ return false;
+
+ return true;
+}
+
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw)
+{
+ u8 center_freq_idx = 0;
+
+ if (band & BAND_AAC) {
+ switch (pri_chan) {
+ case 36:
+ case 40:
+ case 44:
+ case 48:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 42;
+ break;
+ case 52:
+ case 56:
+ case 60:
+ case 64:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 58;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 50;
+ break;
+ case 100:
+ case 104:
+ case 108:
+ case 112:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 106;
+ break;
+ case 116:
+ case 120:
+ case 124:
+ case 128:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 122;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 114;
+ break;
+ case 132:
+ case 136:
+ case 140:
+ case 144:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 138;
+ break;
+ case 149:
+ case 153:
+ case 157:
+ case 161:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 155;
+ break;
+ default:
+ center_freq_idx = 42;
+ }
+ }
+
+ return center_freq_idx;
+}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
index 7c2c69b5b3eb..0b02cb6cfcb4 100644
--- a/drivers/net/wireless/mwifiex/11ac.h
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -40,4 +40,6 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_11ac_vht_cfg *cfg);
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands);
#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 6261f8c53d44..37677af8d2fc 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -34,22 +34,26 @@
*
* RD responder bit to set to clear in the extended capability header.
*/
-void
-mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
- struct mwifiex_ie_types_htcap *ht_cap)
+int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
+ struct ieee80211_ht_cap *ht_cap)
{
- uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
+ uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
struct ieee80211_supported_band *sband =
priv->wdev->wiphy->bands[radio_type];
- ht_cap->ht_cap.ampdu_params_info =
+ if (WARN_ON_ONCE(!sband)) {
+ dev_err(priv->adapter->dev, "Invalid radio type!\n");
+ return -EINVAL;
+ }
+
+ ht_cap->ampdu_params_info =
(sband->ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
((sband->ht_cap.ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
- memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
+ memcpy((u8 *)&ht_cap->mcs, &sband->ht_cap.mcs,
sizeof(sband->ht_cap.mcs));
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
@@ -57,13 +61,18 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
(priv->adapter->sec_chan_offset !=
IEEE80211_HT_PARAM_CHA_SEC_NONE)))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
+ SETHT_MCS32(ht_cap->mcs.rx_mask);
/* Clear RD responder bit */
ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
- ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap);
- ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+ ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
+ ht_cap->extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+
+ if (ISSUPP_BEAMFORMING(priv->adapter->hw_dot_11n_dev_cap))
+ ht_cap->tx_BF_cap_info = cpu_to_le32(MWIFIEX_DEF_11N_TX_BF_CAP);
+
+ return 0;
}
/*
@@ -312,7 +321,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
sizeof(struct ieee_types_header),
le16_to_cpu(ht_cap->header.len));
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
*buffer += sizeof(struct mwifiex_ie_types_htcap);
ret_len += sizeof(struct mwifiex_ie_types_htcap);
@@ -528,16 +537,32 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
{
struct host_cmd_ds_11n_addba_req add_ba_req;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ peer_mac);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+ }
+
add_ba_req.block_ack_param_set = cpu_to_le16(
(u16) ((tid << BLOCKACKPARAM_TID_POS) |
- (priv->add_ba_param.
- tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
- IMMEDIATE_BLOCK_ACK));
+ tx_win_size << BLOCKACKPARAM_WINSIZE_POS |
+ IMMEDIATE_BLOCK_ACK));
add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
++dialog_tok;
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 375db01442bf..12bb6acbdd58 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -34,8 +34,8 @@ int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 **buffer);
-void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
- struct mwifiex_ie_types_htcap *);
+int mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
+ struct ieee80211_ht_cap *);
int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
u16 action, int *htcap_cfg);
void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
@@ -64,14 +64,32 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
-/*
- * This function checks whether AMPDU is allowed or not for a particular TID.
- */
static inline u8
-mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
+mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
{
- return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
- ? true : false);
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
+
+ if (unlikely(!node))
+ return false;
+
+ return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
+}
+
+/* This function checks whether AMPDU is allowed or not for a particular TID. */
+static inline u8
+mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+ } else {
+ if (ptr->tdls_link)
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+
+ return (priv->aggr_prio_tbl[tid].ampdu_ap !=
+ BA_STREAM_NOT_ALLOWED) ? true : false;
+ }
}
/*
@@ -165,4 +183,14 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
return node->is_11n_enabled;
}
+
+static inline u8
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+{
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
+ if (node)
+ return node->is_11n_enabled;
+
+ return false;
+}
#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index ada809f576fe..b361257fb65e 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -290,7 +290,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
last_seq = node->rx_seq[tid];
}
} else {
- last_seq = priv->rx_seq[tid];
+ node = mwifiex_get_sta_entry(priv, ta);
+ if (node)
+ last_seq = node->rx_seq[tid];
+ else
+ last_seq = priv->rx_seq[tid];
}
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
@@ -358,10 +362,28 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*cmd_addba_req)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv,
+ cmd_addba_req->peer_mac_addr);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ cmd_addba_req->peer_mac_addr);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+ }
+
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
@@ -378,8 +400,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
/* We donot support AMSDU inside AMPDU, hence reset the bit */
block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
- block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
- BLOCKACKPARAM_WINSIZE_POS);
+ block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
& IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index a42a506fd32b..2aa208ffbe23 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -41,6 +41,7 @@ mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
mwifiex-y += ethtool.o
mwifiex-y += 11h.o
+mwifiex-y += tdls.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8bfc07cd330e..436ba437a4ba 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1416,9 +1416,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_40)
- config_bands |= BAND_GAC;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
@@ -1583,8 +1580,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
* the function notifies the CFG802.11 subsystem of the new BSS connection.
*/
static int
-mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
- u8 *bssid, int mode, struct ieee80211_channel *channel,
+mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+ const u8 *ssid, const u8 *bssid, int mode,
+ struct ieee80211_channel *channel,
struct cfg80211_connect_params *sme, bool privacy)
{
struct cfg80211_ssid req_ssid;
@@ -1881,7 +1879,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
params->privacy);
done:
if (!ret) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
+ params->chandef.chan, GFP_KERNEL);
dev_dbg(priv->adapter->dev,
"info: joined/created adhoc network with bssid"
" %pM successfully\n", priv->cfg_bssid);
@@ -2595,6 +2594,170 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
}
+/* cfg80211 ops handler for tdls_mgmt.
+ * Function prepares TDLS action frame packets and forwards them to FW
+ */
+static int
+mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Request to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Response to %pM status_code=%d\n",
+ peer, status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Confirm to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
+ peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Request to %pM\n", peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Response to %pM\n", peer);
+ ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ default:
+ dev_warn(priv->adapter->dev,
+ "Unknown TDLS mgmt/action frame %pM\n", peer);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation action)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ dev_dbg(priv->adapter->dev,
+ "TDLS peer=%pM, oper=%d\n", peer, action);
+
+ switch (action) {
+ case NL80211_TDLS_ENABLE_LINK:
+ action = MWIFIEX_TDLS_ENABLE_LINK;
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ action = MWIFIEX_TDLS_DISABLE_LINK;
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: teardown from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_SETUP:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: setup from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: discovery from driver not supported\n");
+ return -EINVAL;
+ default:
+ dev_err(priv->adapter->dev,
+ "tdls_oper: operation not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return mwifiex_tdls_oper(priv, peer, action);
+}
+
+static int
+mwifiex_cfg80211_add_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
+}
+
+static int
+mwifiex_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ int ret;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ /* we support change_station handler only for TDLS peers*/
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ priv->sta_params = params;
+
+ ret = mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CONFIG_LINK);
+ priv->sta_params = NULL;
+
+ return ret;
+}
+
/* station cfg80211 operations */
static struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2630,6 +2793,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_wakeup = mwifiex_cfg80211_set_wakeup,
#endif
.set_coalesce = mwifiex_cfg80211_set_coalesce,
+ .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
+ .tdls_oper = mwifiex_cfg80211_tdls_oper,
+ .add_station = mwifiex_cfg80211_add_station,
+ .change_station = mwifiex_cfg80211_change_station,
};
#ifdef CONFIG_PM
@@ -2715,6 +2882,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
wiphy->regulatory_flags |=
REGULATORY_CUSTOM_REG |
REGULATORY_STRICT_REG;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 9eefacbc844b..2c3226bf86f8 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,95 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
+/* For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+static const u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+};
+
+/* AC rates */
+static const u16 ac_mcs_rate_nss1[8][10] = {
+ /* LG 160M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 160M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 80M */
+ { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
+ 0x249, 0x2BE, 0x30C },
+
+ /* SG 80M */
+ { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
+ 0x28A, 0x30C, 0x363 },
+
+ /* LG 40M */
+ { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
+ 0x10E, 0x144, 0x168 },
+
+ /* SG 40M */
+ { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
+ 0x12C, 0x168, 0x190 },
+
+ /* LG 20M */
+ { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
+
+ /* SG 20M */
+ { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
+};
+
+/* NSS2 note: the value in the table is 2 multiplier of the actual rate */
+static const u16 ac_mcs_rate_nss2[8][10] = {
+ /* LG 160M */
+ { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
+ 0x924, 0xAF8, 0xC30 },
+
+ /* SG 160M */
+ { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
+ 0xA28, 0xC30, 0xD8B },
+
+ /* LG 80M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 80M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 40M */
+ { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
+ 0x21C, 0x288, 0x2D0 },
+
+ /* SG 40M */
+ { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
+ 0x258, 0x2D0, 0x320 },
+
+ /* LG 20M */
+ { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
+ 0x138, 0x00 },
+
+ /* SG 20M */
+ { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
+ 0x15B, 0x00 },
+};
+
struct region_code_mapping {
u8 code;
u8 region[IEEE80211_COUNTRY_STRING_LEN];
@@ -109,95 +198,6 @@ u8 *mwifiex_11d_code_2_region(u8 code)
u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /*
- * For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
- /* AC rates */
- u16 ac_mcs_rate_nss1[8][10] = {
- /* LG 160M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 160M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 80M */
- { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
- 0x249, 0x2BE, 0x30C },
-
- /* SG 80M */
- { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
- 0x28A, 0x30C, 0x363 },
-
- /* LG 40M */
- { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
- 0x10E, 0x144, 0x168 },
-
- /* SG 40M */
- { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
- 0x12C, 0x168, 0x190 },
-
- /* LG 20M */
- { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
-
- /* SG 20M */
- { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
- };
- /* NSS2 note: the value in the table is 2 multiplier of the actual
- * rate
- */
- u16 ac_mcs_rate_nss2[8][10] = {
- /* LG 160M */
- { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
- 0x924, 0xAF8, 0xC30 },
-
- /* SG 160M */
- { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
- 0xA28, 0xC30, 0xD8B },
-
- /* LG 80M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 80M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 40M */
- { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
- 0x21C, 0x288, 0x2D0 },
-
- /* SG 40M */
- { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
- 0x258, 0x2D0, 0x320 },
-
- /* LG 20M */
- { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
- 0x138, 0x00 },
-
- /* SG 20M */
- { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
- 0x15B, 0x00 },
- };
u32 rate = 0;
u8 mcs_index = 0;
u8 bw = 0;
@@ -252,26 +252,6 @@ u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
u32 mcs_num_supp =
(priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
@@ -458,7 +438,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
break;
case BAND_G:
case BAND_G | BAND_GN:
- case BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_g\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -469,10 +448,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
- case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
- BAND_AAC | BAND_GAC:
case BAND_B | BAND_G | BAND_GN:
- case BAND_B | BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_bg\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -496,7 +472,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
sizeof(supported_rates_a));
break;
case BAND_GN:
- case BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_n\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1ddc8b2e3722..21544602043c 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -595,7 +595,8 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Send command */
- if (cmd_no == HostCmd_CMD_802_11_SCAN) {
+ if (cmd_no == HostCmd_CMD_802_11_SCAN ||
+ cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
mwifiex_queue_scan_cmd(priv, cmd_node);
} else {
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
@@ -1454,7 +1455,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
{
struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
struct mwifiex_adapter *adapter = priv->adapter;
- int i;
+ struct mwifiex_ie_types_header *tlv;
+ struct hw_spec_fw_api_rev *api_rev;
+ u16 resp_size, api_id;
+ int i, left_len, parsed_len = 0;
adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
@@ -1498,8 +1502,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* Copy 11AC cap */
adapter->hw_dot_11ac_dev_cap =
le32_to_cpu(hw_spec->dot_11ac_dev_cap);
- adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
- adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
+ adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
+ adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
/* Copy 11AC mcs */
adapter->hw_dot_11ac_mcs_support =
@@ -1510,6 +1516,46 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->is_hw_11ac_capable = false;
}
+ resp_size = le16_to_cpu(resp->size) - S_DS_GEN;
+ if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) {
+ /* we have variable HW SPEC information */
+ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec);
+ while (left_len > sizeof(struct mwifiex_ie_types_header)) {
+ tlv = (void *)&hw_spec->tlvs + parsed_len;
+ switch (le16_to_cpu(tlv->type)) {
+ case TLV_TYPE_FW_API_REV:
+ api_rev = (struct hw_spec_fw_api_rev *)tlv;
+ api_id = le16_to_cpu(api_rev->api_id);
+ switch (api_id) {
+ case KEY_API_VER_ID:
+ adapter->fw_key_api_major_ver =
+ api_rev->major_ver;
+ adapter->fw_key_api_minor_ver =
+ api_rev->minor_ver;
+ dev_dbg(adapter->dev,
+ "fw_key_api v%d.%d\n",
+ adapter->fw_key_api_major_ver,
+ adapter->fw_key_api_minor_ver);
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown FW api_id: %d\n",
+ api_id);
+ break;
+ }
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown GET_HW_SPEC TLV type: %#x\n",
+ le16_to_cpu(tlv->type));
+ break;
+ }
+ parsed_len += le16_to_cpu(tlv->len) +
+ sizeof(struct mwifiex_ie_types_header);
+ left_len -= parsed_len;
+ }
+ }
+
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 3a21bd03d6db..e7b3e16e5d34 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -75,10 +75,16 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
+#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
+#define MWIFIEX_TDLS_DISABLE_LINK 0x00
+#define MWIFIEX_TDLS_ENABLE_LINK 0x01
+#define MWIFIEX_TDLS_CREATE_LINK 0x02
+#define MWIFIEX_TDLS_CONFIG_LINK 0x03
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -92,6 +98,23 @@ enum mwifiex_bss_role {
MWIFIEX_BSS_ROLE_ANY = 0xff,
};
+enum mwifiex_tdls_status {
+ TDLS_NOT_SETUP = 0,
+ TDLS_SETUP_INPROGRESS,
+ TDLS_SETUP_COMPLETE,
+ TDLS_SETUP_FAILURE,
+ TDLS_LINK_TEARDOWN,
+};
+
+enum mwifiex_tdls_error_code {
+ TDLS_ERR_NO_ERROR = 0,
+ TDLS_ERR_INTERNAL_ERROR,
+ TDLS_ERR_MAX_LINKS_EST,
+ TDLS_ERR_LINK_EXISTS,
+ TDLS_ERR_LINK_NONEXISTENT,
+ TDLS_ERR_PEER_STA_UNREACHABLE = 25,
+};
+
#define BSS_ROLE_BIT_MASK BIT(0)
#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5fa932d5f905..aa8abef58349 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -50,21 +50,23 @@ struct tx_packet_hdr {
#define HOSTCMD_SUPPORTED_RATES 14
#define N_SUPPORTED_RATES 3
#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \
- BAND_AN | BAND_GAC | BAND_AAC)
+ BAND_AN | BAND_AAC)
#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
- BIT(12) | BIT(13))
+ BIT(13))
#define IS_SUPPORT_MULTI_BANDS(adapter) \
(adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
-/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14
- * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit
- * bit 14 for AAC, in order to be compatible with the band capability
- * defined in the driver after right shift of 8 bits.
+/* bit 13: 11ac BAND_AAC
+ * bit 12: reserved for lab testing, will be reused for BAND_AN
+ * bit 11: 11n BAND_GN
+ * bit 10: 11a BAND_A
+ * bit 9: 11g BAND_G
+ * bit 8: 11b BAND_B
+ * Map these bits to band capability by right shifting 8 bits.
*/
#define GET_FW_DEFAULT_BANDS(adapter) \
- (((((adapter->fw_cap_info & 0x3000) << 1) | \
- (adapter->fw_cap_info & ~0xF000)) >> 8) & \
+ (((adapter->fw_cap_info & 0x2f00) >> 8) & \
ALL_802_11_BANDS)
#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
@@ -77,12 +79,21 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
};
+
+#define WPA_PN_SIZE 8
+#define KEY_PARAMS_FIXED_LEN 10
+#define KEY_INDEX_MASK 0xf
+#define FW_KEY_API_VER_MAJOR_V2 2
+
#define KEY_MCAST BIT(0)
#define KEY_UNICAST BIT(1)
#define KEY_ENABLED BIT(2)
+#define KEY_DEFAULT BIT(3)
+#define KEY_TX_KEY BIT(4)
+#define KEY_RX_KEY BIT(5)
#define KEY_IGTK BIT(10)
-#define WAPI_KEY_LEN 50
+#define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
#define MAX_POLL_TRIES 100
#define MAX_FIRMWARE_POLL_TRIES 100
@@ -130,6 +141,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
+#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
@@ -144,6 +156,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
+#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -154,6 +168,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
+#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -176,11 +192,14 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
IEEE80211_HT_CAP_SM_PS)
+#define MWIFIEX_DEF_11N_TX_BF_CAP 0x09E1E008
+
#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
/* dev_cap bitmap
@@ -204,6 +223,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
+#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
/* httxcfg bitmap
* 0 reserved
@@ -226,17 +246,24 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
/* HW_SPEC fw_cap_info */
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & BIT(13))
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
(2 * (nss - 1)))
-#define NO_NSS_SUPPORT 0x3
-
#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
+/* Clear SU Beanformer, MU beanformer, MU beanformee and
+ * sounding dimensions bits
+ */
+#define MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK \
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | \
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
+
#define MOD_CLASS_HR_DSSS 0x03
#define MOD_CLASS_OFDM 0x07
#define MOD_CLASS_HT 0x08
@@ -295,10 +322,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_802_11_SCAN_EXT 0x0107
#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_TDLS_OPER 0x0122
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -440,6 +469,7 @@ enum P2P_MODES {
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
+#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_ID_MASK 0xffff
@@ -468,6 +498,10 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+#define ACT_TDLS_DELETE 0x00
+#define ACT_TDLS_CREATE 0x01
+#define ACT_TDLS_CONFIG 0x02
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -480,6 +514,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
+#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
struct txpd {
u8 bss_type;
@@ -676,6 +711,56 @@ struct mwifiex_cmac_param {
u8 key[WLAN_KEY_LEN_AES_CMAC];
} __packed;
+struct mwifiex_wep_param {
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_WEP104];
+} __packed;
+
+struct mwifiex_tkip_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_TKIP];
+} __packed;
+
+struct mwifiex_aes_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_CCMP];
+} __packed;
+
+struct mwifiex_wapi_param {
+ u8 pn[PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_SMS4];
+} __packed;
+
+struct mwifiex_cmac_aes_param {
+ u8 ipn[IGTK_PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
+struct mwifiex_ie_type_key_param_set_v2 {
+ __le16 type;
+ __le16 len;
+ u8 mac_addr[ETH_ALEN];
+ u8 key_idx;
+ u8 key_type;
+ __le16 key_info;
+ union {
+ struct mwifiex_wep_param wep;
+ struct mwifiex_tkip_param tkip;
+ struct mwifiex_aes_param aes;
+ struct mwifiex_wapi_param wapi;
+ struct mwifiex_cmac_aes_param cmac_aes;
+ } key_params;
+} __packed;
+
+struct host_cmd_ds_802_11_key_material_v2 {
+ __le16 action;
+ struct mwifiex_ie_type_key_param_set_v2 key_param_set;
+} __packed;
+
struct host_cmd_ds_802_11_key_material {
__le16 action;
struct mwifiex_ie_type_key_param_set key_param_set;
@@ -727,6 +812,17 @@ struct host_cmd_ds_802_11_ps_mode_enh {
} params;
} __packed;
+enum FW_API_VER_ID {
+ KEY_API_VER_ID = 1,
+};
+
+struct hw_spec_fw_api_rev {
+ struct mwifiex_ie_types_header header;
+ __le16 api_id;
+ u8 major_ver;
+ u8 minor_ver;
+} __packed;
+
struct host_cmd_ds_get_hw_spec {
__le16 hw_if_version;
__le16 version;
@@ -748,6 +844,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 reserved_6;
__le32 dot_11ac_dev_cap;
__le32 dot_11ac_mcs_support;
+ u8 tlvs[0];
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -1047,14 +1144,28 @@ struct host_cmd_ds_rf_ant_siso {
__le16 ant_mode;
};
-struct mwifiex_bcn_param {
- u8 bssid[ETH_ALEN];
- u8 rssi;
+struct host_cmd_ds_tdls_oper {
+ __le16 tdls_action;
+ __le16 reason;
+ u8 peer_mac[ETH_ALEN];
+} __packed;
+
+struct mwifiex_fixed_bcn_param {
__le64 timestamp;
__le16 beacon_period;
__le16 cap_info_bitmap;
} __packed;
+struct mwifiex_event_scan_result {
+ __le16 event_id;
+ u8 bss_index;
+ u8 bss_type;
+ u8 more_event;
+ u8 reserved[3];
+ __le16 buf_size;
+ u8 num_of_set;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
@@ -1124,6 +1235,28 @@ struct host_cmd_ds_802_11_scan_rsp {
u8 bss_desc_and_tlv_buffer[1];
} __packed;
+struct host_cmd_ds_802_11_scan_ext {
+ u32 reserved;
+ u8 tlv_buffer[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_rsp {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+ u8 frame_body[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_info {
+ struct mwifiex_ie_types_header header;
+ __le16 rssi;
+ __le16 anpi;
+ u8 cca_busy_fraction;
+ u8 radio_type;
+ u8 channel;
+ u8 reserved;
+ __le64 tsf;
+} __packed;
+
struct host_cmd_ds_802_11_bg_scan_query {
u8 flush;
} __packed;
@@ -1296,6 +1429,11 @@ struct mwifiex_ie_types_vhtcap {
struct ieee80211_vht_cap vht_cap;
} __packed;
+struct mwifiex_ie_types_aid {
+ struct mwifiex_ie_types_header header;
+ __le16 aid;
+} __packed;
+
struct mwifiex_ie_types_oper_mode_ntf {
struct mwifiex_ie_types_header header;
u8 oper_mode;
@@ -1331,6 +1469,11 @@ struct mwifiex_ie_types_extcap {
u8 ext_capab[0];
} __packed;
+struct mwifiex_ie_types_qos_info {
+ struct mwifiex_ie_types_header header;
+ u8 qos_info;
+} __packed;
+
struct host_cmd_ds_mac_reg_access {
__le16 action;
__le16 offset;
@@ -1441,6 +1584,11 @@ struct host_cmd_tlv_rates {
u8 rates[0];
} __packed;
+struct mwifiex_ie_types_bssid_list {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+} __packed;
+
struct host_cmd_tlv_bcast_ssid {
struct mwifiex_ie_types_header header;
u8 bcast_ctl;
@@ -1634,6 +1782,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
struct host_cmd_ds_802_11_scan scan;
+ struct host_cmd_ds_802_11_scan_ext ext_scan;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
@@ -1653,6 +1802,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_11n_cfg htcfg;
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
+ struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
struct host_cmd_ds_version_ext verext;
struct host_cmd_ds_mgmt_frame_reg reg_mask;
struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1671,6 +1821,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+ struct host_cmd_ds_tdls_oper tdls_oper;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 1d0a817f2bf0..a4cd2cb066ed 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_expire_time = 0;
priv->del_list_idx = 0;
priv->hs2_enabled = false;
+ memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -281,6 +282,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
adapter->empty_tx_q_cnt = 0;
+ adapter->ext_scan = true;
+ adapter->fw_key_api_major_ver = 0;
+ adapter->fw_key_api_minor_ver = 0;
}
/*
@@ -450,6 +454,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
INIT_LIST_HEAD(&priv->sta_list);
+ skb_queue_head_init(&priv->tdls_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 00a95f4c6a6c..5974642f38b1 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -60,8 +60,7 @@ enum {
BAND_A = 4,
BAND_GN = 8,
BAND_AN = 16,
- BAND_GAC = 32,
- BAND_AAC = 64,
+ BAND_AAC = 32,
};
#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -86,6 +85,10 @@ struct wep_key {
#define BAND_CONFIG_A 0x01
#define MWIFIEX_SUPPORTED_RATES 14
#define MWIFIEX_SUPPORTED_RATES_EXT 32
+#define MWIFIEX_TDLS_SUPPORTED_RATES 8
+#define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf
+#define MWIFIEX_PRIO_BK 2
+#define MWIFIEX_PRIO_VI 5
struct mwifiex_uap_bss_param {
u8 channel;
@@ -233,7 +236,10 @@ struct mwifiex_ds_encrypt_key {
u8 mac_addr[ETH_ALEN];
u32 is_wapi_key;
u8 pn[PN_LEN]; /* packet number */
+ u8 pn_len;
u8 is_igtk_key;
+ u8 is_current_wep_key;
+ u8 is_rx_seq_valid;
};
struct mwifiex_power_cfg {
@@ -432,4 +438,16 @@ struct mwifiex_ds_coalesce_cfg {
struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
};
+struct mwifiex_ds_tdls_oper {
+ u16 tdls_action;
+ u8 peer_mac[ETH_ALEN];
+ u16 capability;
+ u8 qos_info;
+ u8 *ext_capab;
+ u8 ext_capab_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *ht_capab;
+};
+
#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 4e4686e6ac09..34472ea53841 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -515,8 +515,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
/* Append vendor specific IE TLV */
@@ -983,7 +982,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type = mwifiex_band_to_radio_type(
priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_NONE) {
@@ -1300,8 +1299,7 @@ int mwifiex_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1335,8 +1333,7 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1376,8 +1373,7 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4d79761b9c87..9d3d2758ec35 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d8ad554ce39f..407f8eada720 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -59,7 +59,7 @@ enum {
#define MWIFIEX_UPLD_SIZE (2312)
-#define MAX_EVENT_SIZE 1024
+#define MAX_EVENT_SIZE 2048
#define ARP_FILTER_MAX_BUF_SIZE 68
@@ -210,6 +210,7 @@ struct mwifiex_ra_list_tbl {
u16 ba_pkt_count;
u8 ba_packet_thr;
u16 total_pkt_count;
+ bool tdls_link;
};
struct mwifiex_tid_tbl {
@@ -262,6 +263,31 @@ struct ieee_types_generic {
u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
} __packed;
+struct ieee_types_bss_co_2040 {
+ struct ieee_types_header ieee_hdr;
+ u8 bss_2040co;
+} __packed;
+
+struct ieee_types_extcap {
+ struct ieee_types_header ieee_hdr;
+ u8 ext_capab[8];
+} __packed;
+
+struct ieee_types_vht_cap {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_cap vhtcap;
+} __packed;
+
+struct ieee_types_vht_oper {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_operation vhtoper;
+} __packed;
+
+struct ieee_types_aid {
+ struct ieee_types_header ieee_hdr;
+ u16 aid;
+} __packed;
+
struct mwifiex_bssdescriptor {
u8 mac_address[ETH_ALEN];
struct cfg80211_ssid ssid;
@@ -443,6 +469,7 @@ struct mwifiex_private {
u8 wpa_ie_len;
u8 wpa_is_gtk_set;
struct host_cmd_ds_802_11_key_material aes_key;
+ struct host_cmd_ds_802_11_key_material_v2 aes_key_v2;
u8 wapi_ie[256];
u8 wapi_ie_len;
u8 *wps_ie;
@@ -461,6 +488,7 @@ struct mwifiex_private {
struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
struct mwifiex_add_ba_param add_ba_param;
u16 rx_seq[MAX_NUM_TID];
+ u8 tos_to_tid_inv[MAX_NUM_TID];
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
@@ -518,6 +546,8 @@ struct mwifiex_private {
unsigned long csa_expire_time;
u8 del_list_idx;
bool hs2_enabled;
+ struct station_parameters *sta_params;
+ struct sk_buff_head tdls_txq;
};
enum mwifiex_ba_status {
@@ -583,17 +613,35 @@ struct mwifiex_bss_priv {
u64 fw_tsf;
};
-/* This is AP specific structure which stores information
- * about associated STA
+struct mwifiex_tdls_capab {
+ __le16 capab;
+ u8 rates[32];
+ u8 rates_len;
+ u8 qos_info;
+ u8 coex_2040;
+ u16 aid;
+ struct ieee80211_ht_cap ht_capb;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee_types_extcap extcap;
+ struct ieee_types_generic rsn_ie;
+ struct ieee80211_vht_cap vhtcap;
+ struct ieee80211_vht_operation vhtoper;
+};
+
+/* This is AP/TDLS specific structure which stores information
+ * about associated/peer STA
*/
struct mwifiex_sta_node {
struct list_head list;
u8 mac_addr[ETH_ALEN];
u8 is_wmm_enabled;
u8 is_11n_enabled;
+ u8 is_11ac_enabled;
u8 ampdu_sta[MAX_NUM_TID];
u16 rx_seq[MAX_NUM_TID];
u16 max_amsdu;
+ u8 tdls_status;
+ struct mwifiex_tdls_capab tdls_cap;
};
struct mwifiex_if_ops {
@@ -753,6 +801,8 @@ struct mwifiex_adapter {
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
struct semaphore *card_sem;
+ bool ext_scan;
+ u8 fw_key_api_major_ver, fw_key_api_minor_ver;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -938,6 +988,12 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params);
void mwifiex_set_ba_params(struct mwifiex_private *priv);
void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf);
/*
* This function checks if the queuing is RA based or not.
@@ -1078,7 +1134,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
const u8 *key, int key_len, u8 key_index,
const u8 *mac_addr, int disable);
-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
+int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
int mwifiex_get_ver_ext(struct mwifiex_private *priv);
@@ -1159,6 +1215,32 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node);
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 03688aa14e8a..d11d4acf0890 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -39,20 +39,31 @@ static struct semaphore add_remove_card_sem;
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
- int size, int flags)
+ size_t size, int flags)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
+ struct mwifiex_dma_mapping mapping;
- buf_pa = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, buf_pa)) {
+ mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
+ if (pci_dma_mapping_error(card->dev, mapping.addr)) {
dev_err(adapter->dev, "failed to map pci memory!\n");
return -1;
}
- memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t));
+ mapping.len = size;
+ memcpy(skb->cb, &mapping, sizeof(mapping));
return 0;
}
+static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, int flags)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+ pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+}
+
/*
* This function reads sleep cookie and checks if FW is ready
*/
@@ -456,7 +467,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -513,7 +524,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -549,8 +560,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc2 = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -558,8 +569,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -587,8 +598,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc2 = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -596,8 +607,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -622,8 +633,8 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
desc = card->evtbd_ring[i];
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -861,7 +872,6 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card;
- dma_addr_t buf_pa;
if (!adapter)
return 0;
@@ -869,16 +879,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
}
if (card && card->cmd_buf) {
- MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
}
return 0;
}
@@ -956,7 +964,6 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
{
struct sk_buff *skb;
- dma_addr_t buf_pa;
u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
struct mwifiex_pcie_buf_desc *desc;
struct mwifiex_pfu_buf_desc *desc2;
@@ -986,13 +993,13 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
reg->tx_start_ptr;
skb = card->tx_buf_list[wrdoneidx];
+
if (skb) {
dev_dbg(adapter->dev,
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
unmap_count++;
@@ -1082,12 +1089,12 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
tmp = (__le16 *)&payload[2];
*tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len ,
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len,
PCI_DMA_TODEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
card->tx_buf_list[wrindx] = skb;
if (reg->pfu_enabled) {
@@ -1162,8 +1169,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
card->tx_buf_list[wrindx] = NULL;
if (reg->pfu_enabled)
memset(desc2, 0, sizeof(*desc2));
@@ -1211,9 +1217,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
- MWIFIEX_SKB_PACB(skb_data, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1240,7 +1244,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
dev_dbg(adapter->dev,
"RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
@@ -1316,7 +1320,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
/* Write the lower 32bits of the physical address to low command
* address scratch register
@@ -1325,8 +1329,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1338,8 +1341,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1348,8 +1350,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1358,8 +1359,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
dev_err(adapter->dev,
"%s: failed to assert door-bell intr\n", __func__);
- pci_unmap_single(card->dev, buf_pa,
- MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1433,7 +1433,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
+ cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
/* Write the lower 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
@@ -1454,7 +1454,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
}
- MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
+ cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
(u32)cmd_buf_pa)) {
@@ -1508,13 +1508,17 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
int count = 0;
u16 rx_len;
__le16 pkt_len;
- dma_addr_t buf_pa;
dev_dbg(adapter->dev, "info: Rx CMD Response\n");
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+
+ /* Unmap the command as a response has been received. */
+ if (card->cmd_buf) {
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
+ card->cmd_buf = NULL;
+ }
pkt_len = *((__le16 *)skb->data);
rx_len = le16_to_cpu(pkt_len);
@@ -1538,8 +1542,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
-
- MWIFIEX_SKB_PACB(skb, &buf_pa);
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
@@ -1574,8 +1576,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
- struct sk_buff *skb_tmp;
if (skb) {
card->cmdrsp_buf = skb;
@@ -1585,14 +1585,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
return -1;
}
- skb_tmp = card->cmd_buf;
- if (skb_tmp) {
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
- PCI_DMA_FROMDEVICE);
- card->cmd_buf = NULL;
- }
-
return 0;
}
@@ -1605,7 +1597,6 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr, event;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1641,9 +1632,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- MWIFIEX_SKB_PACB(skb_cmd, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1689,7 +1678,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
int ret = 0;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!skb)
@@ -1714,11 +1702,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
card->evt_buf_list[rdptr] = skb;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
desc = card->evtbd_ring[rdptr];
- desc->paddr = buf_pa;
+ desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
desc->len = (u16)skb->len;
desc->flags = 0;
skb = NULL;
@@ -1768,7 +1754,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct sk_buff *skb;
u32 txlen, tx_blocks = 0, tries, len;
u32 block_retry_cnt = 0;
- dma_addr_t buf_pa;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -1866,8 +1851,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
goto done;
}
- MWIFIEX_SKB_PACB(skb, &buf_pa);
-
/* Wait for the command done interrupt */
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1875,16 +1858,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_err(adapter->dev, "%s: Failed to read "
"interrupt status during fw dnld.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
ret = -1;
goto done;
}
} while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
CPU_INTR_DOOR_BELL);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
offset += txlen;
} while (true);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0a8a26e10f01..92adbb1ebabc 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -595,7 +595,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
- u32 tlv_idx, rates_size;
+ u32 tlv_idx, rates_size, cmd_no;
u32 total_scan_time;
u32 done_early;
u8 radio_type;
@@ -733,9 +733,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Send the scan command to the firmware with the specified
cfg */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
- HostCmd_ACT_GEN_SET, 0,
- scan_cfg_out);
+ if (priv->adapter->ext_scan)
+ cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
+ else
+ cmd_no = HostCmd_CMD_802_11_SCAN;
+
+ ret = mwifiex_send_cmd_async(priv, cmd_no, HostCmd_ACT_GEN_SET,
+ 0, scan_cfg_out);
/* rate IE is updated per scan command but same starting
* pointer is used each time so that rate IE from earlier
@@ -786,6 +790,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+ struct mwifiex_ie_types_bssid_list *bssid_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -848,6 +853,17 @@ mwifiex_config_scan(struct mwifiex_private *priv,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
+ if (adapter->ext_scan &&
+ !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+ bssid_tlv =
+ (struct mwifiex_ie_types_bssid_list *)tlv_pos;
+ bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
+ bssid_tlv->header.len = cpu_to_le16(ETH_ALEN);
+ memcpy(bssid_tlv->bssid, user_scan_in->specific_bssid,
+ ETH_ALEN);
+ tlv_pos += sizeof(struct mwifiex_ie_types_bssid_list);
+ }
+
for (i = 0; i < user_scan_in->num_ssids; i++) {
ssid_len = user_scan_in->ssid_list[i].ssid_len;
@@ -941,7 +957,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type =
mwifiex_band_to_radio_type(priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
}
@@ -1576,6 +1592,228 @@ done:
return 0;
}
+static int
+mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+ u32 *bytes_left, u64 fw_tsf, u8 *radio_type,
+ bool ext_scan, s32 rssi_val)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_chan_freq_power *cfp;
+ struct cfg80211_bss *bss;
+ u8 bssid[ETH_ALEN];
+ s32 rssi;
+ const u8 *ie_buf;
+ size_t ie_len;
+ u16 channel = 0;
+ u16 beacon_size = 0;
+ u32 curr_bcn_bytes;
+ u32 freq;
+ u16 beacon_period;
+ u16 cap_info_bitmap;
+ u8 *current_ptr;
+ u64 timestamp;
+ struct mwifiex_fixed_bcn_param *bcn_param;
+ struct mwifiex_bss_priv *bss_priv;
+
+ if (*bytes_left >= sizeof(beacon_size)) {
+ /* Extract & convert beacon size from command buffer */
+ memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+ *bytes_left -= sizeof(beacon_size);
+ *bss_info += sizeof(beacon_size);
+ }
+
+ if (!beacon_size || beacon_size > *bytes_left) {
+ *bss_info += *bytes_left;
+ *bytes_left = 0;
+ return -EFAULT;
+ }
+
+ /* Initialize the current working beacon pointer for this BSS
+ * iteration
+ */
+ current_ptr = *bss_info;
+
+ /* Advance the return beacon pointer past the current beacon */
+ *bss_info += beacon_size;
+ *bytes_left -= beacon_size;
+
+ curr_bcn_bytes = beacon_size;
+
+ /* First 5 fields are bssid, RSSI(for legacy scan only),
+ * time stamp, beacon interval, and capability information
+ */
+ if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
+ sizeof(struct mwifiex_fixed_bcn_param)) {
+ dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ return -EFAULT;
+ }
+
+ memcpy(bssid, current_ptr, ETH_ALEN);
+ current_ptr += ETH_ALEN;
+ curr_bcn_bytes -= ETH_ALEN;
+
+ if (!ext_scan) {
+ rssi = (s32) *(u8 *)current_ptr;
+ rssi = (-rssi) * 100; /* Convert dBm to mBm */
+ current_ptr += sizeof(u8);
+ curr_bcn_bytes -= sizeof(u8);
+ dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ } else {
+ rssi = rssi_val;
+ }
+
+ bcn_param = (struct mwifiex_fixed_bcn_param *)current_ptr;
+ current_ptr += sizeof(*bcn_param);
+ curr_bcn_bytes -= sizeof(*bcn_param);
+
+ timestamp = le64_to_cpu(bcn_param->timestamp);
+ beacon_period = le16_to_cpu(bcn_param->beacon_period);
+
+ cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
+ dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
+
+ /* Rest of the current buffer are IE's */
+ ie_buf = current_ptr;
+ ie_len = curr_bcn_bytes;
+ dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
+
+ while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
+ u8 element_id, element_len;
+
+ element_id = *current_ptr;
+ element_len = *(current_ptr + 1);
+ if (curr_bcn_bytes < element_len +
+ sizeof(struct ieee_types_header)) {
+ dev_err(adapter->dev,
+ "%s: bytes left < IE length\n", __func__);
+ return -EFAULT;
+ }
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(current_ptr +
+ sizeof(struct ieee_types_header));
+ break;
+ }
+
+ current_ptr += element_len + sizeof(struct ieee_types_header);
+ curr_bcn_bytes -= element_len +
+ sizeof(struct ieee_types_header);
+ }
+
+ if (channel) {
+ struct ieee80211_channel *chan;
+ u8 band;
+
+ /* Skip entry if on csa closed channel */
+ if (channel == priv->csa_chan) {
+ dev_dbg(adapter->dev,
+ "Dropping entry on csa closed channel\n");
+ return 0;
+ }
+
+ band = BAND_G;
+ if (radio_type)
+ band = mwifiex_radio_type_to_band(*radio_type &
+ (BIT(0) | BIT(1)));
+
+ cfp = mwifiex_get_cfp(priv, band, channel, 0);
+
+ freq = cfp ? cfp->freq : 0;
+
+ chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
+
+ if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ chan, bssid, timestamp,
+ cap_info_bitmap, beacon_period,
+ ie_buf, ie_len, rssi, GFP_KERNEL);
+ bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ bss_priv->band = band;
+ bss_priv->fw_tsf = fw_tsf;
+ if (priv->media_connected &&
+ !memcmp(bssid, priv->curr_bss_params.bss_descriptor
+ .mac_address, ETH_ALEN))
+ mwifiex_update_curr_bss_params(priv, bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
+ }
+ } else {
+ dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ }
+
+ return 0;
+}
+
+static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ if (list_empty(&adapter->scan_pending_q)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+ /* Need to indicate IOCTL complete */
+ if (adapter->curr_cmd->wait_q_enabled) {
+ adapter->cmd_wait_q.status = 0;
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev,
+ "complete internal scan\n");
+ mwifiex_complete_cmd(adapter,
+ adapter->curr_cmd);
+ }
+ }
+ if (priv->report_scan_result)
+ priv->report_scan_result = false;
+
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: notifying scan done\n");
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ dev_dbg(adapter->dev, "info: scan already aborted\n");
+ }
+ } else {
+ if ((priv->scan_aborting && !priv->scan_request) ||
+ priv->scan_block) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+ mod_timer(&priv->scan_delay_timer, jiffies);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: triggerring scan abort\n", __func__);
+ } else if (!mwifiex_wmm_lists_empty(adapter) &&
+ (priv->scan_request && (priv->scan_request->flags &
+ NL80211_SCAN_FLAG_LOW_PRIORITY))) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = 1;
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ dev_dbg(priv->adapter->dev,
+ "info: %s: deferring scan\n", __func__);
+ } else {
+ /* Get scan command from scan_pending_q and put to
+ * cmd_pending_q
+ */
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ }
+ }
+
+ return;
+}
+
/*
* This function handles the command response of scan.
*
@@ -1600,7 +1838,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
struct mwifiex_ie_types_data *tlv_data;
struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
@@ -1609,12 +1846,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
u32 bytes_left;
u32 idx;
u32 tlv_buf_size;
- struct mwifiex_chan_freq_power *cfp;
struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
struct chan_band_param_set *chan_band;
u8 is_bgscan_resp;
- unsigned long flags;
- struct cfg80211_bss *bss;
+ __le64 fw_tsf = 0;
+ u8 *radio_type;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -1676,220 +1912,194 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
&chan_band_tlv);
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
- u8 bssid[ETH_ALEN];
- s32 rssi;
- const u8 *ie_buf;
- size_t ie_len;
- u16 channel = 0;
- __le64 fw_tsf = 0;
- u16 beacon_size = 0;
- u32 curr_bcn_bytes;
- u32 freq;
- u16 beacon_period;
- u16 cap_info_bitmap;
- u8 *current_ptr;
- u64 timestamp;
- struct mwifiex_bcn_param *bcn_param;
- struct mwifiex_bss_priv *bss_priv;
-
- if (bytes_left >= sizeof(beacon_size)) {
- /* Extract & convert beacon size from command buffer */
- memcpy(&beacon_size, bss_info, sizeof(beacon_size));
- bytes_left -= sizeof(beacon_size);
- bss_info += sizeof(beacon_size);
- }
+ /*
+ * If the TSF TLV was appended to the scan results, save this
+ * entry's TSF value in the fw_tsf field. It is the firmware's
+ * TSF value at the time the beacon or probe response was
+ * received.
+ */
+ if (tsf_tlv)
+ memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+ sizeof(fw_tsf));
- if (!beacon_size || beacon_size > bytes_left) {
- bss_info += bytes_left;
- bytes_left = 0;
- ret = -1;
- goto check_next_scan;
+ if (chan_band_tlv) {
+ chan_band = &chan_band_tlv->chan_band_param[idx];
+ radio_type = &chan_band->radio_type;
+ } else {
+ radio_type = NULL;
}
- /* Initialize the current working beacon pointer for this BSS
- * iteration */
- current_ptr = bss_info;
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left,
+ le64_to_cpu(fw_tsf),
+ radio_type, false, 0);
+ if (ret)
+ goto check_next_scan;
+ }
- /* Advance the return beacon pointer past the current beacon */
- bss_info += beacon_size;
- bytes_left -= beacon_size;
+check_next_scan:
+ mwifiex_check_next_scan_command(priv);
+ return ret;
+}
- curr_bcn_bytes = beacon_size;
+/*
+ * This function prepares an extended scan command to be sent to the firmware
+ *
+ * This uses the scan command configuration sent to the command processing
+ * module in command preparation stage to configure a extended scan command
+ * structure to send to firmware.
+ */
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_802_11_scan_ext *ext_scan = &cmd->params.ext_scan;
+ struct mwifiex_scan_cmd_config *scan_cfg = data_buf;
- /*
- * First 5 fields are bssid, RSSI, time stamp, beacon interval,
- * and capability information
- */
- if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
- dev_err(adapter->dev,
- "InterpretIE: not enough bytes left\n");
- continue;
- }
- bcn_param = (struct mwifiex_bcn_param *)current_ptr;
- current_ptr += sizeof(*bcn_param);
- curr_bcn_bytes -= sizeof(*bcn_param);
+ memcpy(ext_scan->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
- memcpy(bssid, bcn_param->bssid, ETH_ALEN);
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
- rssi = (s32) bcn_param->rssi;
- rssi = (-rssi) * 100; /* Convert dBm to mBm */
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
+ cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved)
+ + scan_cfg->tlv_buf_len + S_DS_GEN));
- timestamp = le64_to_cpu(bcn_param->timestamp);
- beacon_period = le16_to_cpu(bcn_param->beacon_period);
+ return 0;
+}
- cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+/* This function handles the command response of extended scan */
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+{
+ dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ return 0;
+}
- /* Rest of the current buffer are IE's */
- ie_buf = current_ptr;
- ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev,
- "info: InterpretIE: IELength for this AP = %d\n",
- curr_bcn_bytes);
+/* This function This function handles the event extended scan report. It
+ * parses extended scan results and informs to cfg80211 stack.
+ */
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf)
+{
+ int ret = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *bss_info;
+ u32 bytes_left, bytes_left_for_tlv, idx;
+ u16 type, len;
+ struct mwifiex_ie_types_data *tlv;
+ struct mwifiex_ie_types_bss_scan_rsp *scan_rsp_tlv;
+ struct mwifiex_ie_types_bss_scan_info *scan_info_tlv;
+ u8 *radio_type;
+ u64 fw_tsf = 0;
+ s32 rssi = 0;
+ struct mwifiex_event_scan_result *event_scan = buf;
+ u8 num_of_set = event_scan->num_of_set;
+ u8 *scan_resp = buf + sizeof(struct mwifiex_event_scan_result);
+ u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
+
+ if (num_of_set > MWIFIEX_MAX_AP) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+ num_of_set);
+ ret = -1;
+ goto check_next_scan;
+ }
- while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
- u8 element_id, element_len;
+ bytes_left = scan_resp_size;
+ dev_dbg(adapter->dev,
+ "EXT_SCAN: size %d, returned %d APs...",
+ scan_resp_size, num_of_set);
- element_id = *current_ptr;
- element_len = *(current_ptr + 1);
- if (curr_bcn_bytes < element_len +
- sizeof(struct ieee_types_header)) {
- dev_err(priv->adapter->dev,
- "%s: bytes left < IE length\n",
- __func__);
- goto check_next_scan;
- }
- if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(current_ptr + sizeof(struct ieee_types_header));
- break;
- }
+ tlv = (struct mwifiex_ie_types_data *)scan_resp;
- current_ptr += element_len +
- sizeof(struct ieee_types_header);
- curr_bcn_bytes -= element_len +
- sizeof(struct ieee_types_header);
+ for (idx = 0; idx < num_of_set && bytes_left; idx++) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+ break;
}
+ scan_rsp_tlv = NULL;
+ scan_info_tlv = NULL;
+ bytes_left_for_tlv = bytes_left;
- /*
- * If the TSF TLV was appended to the scan results, save this
- * entry's TSF value in the fw_tsf field. It is the firmware's
- * TSF value at the time the beacon or probe response was
- * received.
+ /* BSS response TLV with beacon or probe response buffer
+ * at the initial position of each descriptor
*/
- if (tsf_tlv)
- memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
- sizeof(fw_tsf));
-
- if (channel) {
- struct ieee80211_channel *chan;
- u8 band;
+ if (type != TLV_TYPE_BSS_SCAN_RSP)
+ break;
- /* Skip entry if on csa closed channel */
- if (channel == priv->csa_chan) {
- dev_dbg(adapter->dev,
- "Dropping entry on csa closed channel\n");
+ bss_info = (u8 *)tlv;
+ scan_rsp_tlv = (struct mwifiex_ie_types_bss_scan_rsp *)tlv;
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+
+ while (bytes_left_for_tlv >=
+ sizeof(struct mwifiex_ie_types_header) &&
+ le16_to_cpu(tlv->header.type) != TLV_TYPE_BSS_SCAN_RSP) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left_for_tlv <
+ sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+ scan_rsp_tlv = NULL;
+ bytes_left_for_tlv = 0;
continue;
}
-
- band = BAND_G;
- if (chan_band_tlv) {
- chan_band =
- &chan_band_tlv->chan_band_param[idx];
- band = mwifiex_radio_type_to_band(
- chan_band->radio_type
- & (BIT(0) | BIT(1)));
- }
-
- cfp = mwifiex_get_cfp(priv, band, channel, 0);
-
- freq = cfp ? cfp->freq : 0;
-
- chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
-
- if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
- bss = cfg80211_inform_bss(priv->wdev->wiphy,
- chan, bssid, timestamp,
- cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
- bss_priv = (struct mwifiex_bss_priv *)bss->priv;
- bss_priv->band = band;
- bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
- if (priv->media_connected &&
- !memcmp(bssid,
- priv->curr_bss_params.bss_descriptor
- .mac_address, ETH_ALEN))
- mwifiex_update_curr_bss_params(priv,
- bss);
- cfg80211_put_bss(priv->wdev->wiphy, bss);
+ switch (type) {
+ case TLV_TYPE_BSS_SCAN_INFO:
+ scan_info_tlv =
+ (struct mwifiex_ie_types_bss_scan_info *)tlv;
+ if (len !=
+ sizeof(struct mwifiex_ie_types_bss_scan_info) -
+ sizeof(struct mwifiex_ie_types_header)) {
+ bytes_left_for_tlv = 0;
+ continue;
+ }
+ break;
+ default:
+ break;
}
- } else {
- dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
}
- }
-check_next_scan:
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ if (!scan_rsp_tlv)
+ break;
- /* Need to indicate IOCTL complete */
- if (adapter->curr_cmd->wait_q_enabled) {
- adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
- }
- if (priv->report_scan_result)
- priv->report_scan_result = false;
+ /* Advance pointer to the beacon buffer length and
+ * update the bytes count so that the function
+ * wlan_interpret_bss_desc_with_ie() can handle the
+ * scan buffer withut any change
+ */
+ bss_info += sizeof(u16);
+ bytes_left -= sizeof(u16);
- if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: notifying scan done\n");
- cfg80211_scan_done(priv->scan_request, 0);
- priv->scan_request = NULL;
- } else {
- priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
- }
- } else {
- if ((priv->scan_aborting && !priv->scan_request) ||
- priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
- mod_timer(&priv->scan_delay_timer, jiffies);
- dev_dbg(priv->adapter->dev,
- "info: %s: triggerring scan abort\n", __func__);
- } else if (!mwifiex_wmm_lists_empty(adapter) &&
- (priv->scan_request && (priv->scan_request->flags &
- NL80211_SCAN_FLAG_LOW_PRIORITY))) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = 1;
- mod_timer(&priv->scan_delay_timer, jiffies +
- msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
- dev_dbg(priv->adapter->dev,
- "info: %s: deferring scan\n", __func__);
+ if (scan_info_tlv) {
+ rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
+ rssi *= 100; /* Convert dBm to mBm */
+ dev_dbg(adapter->dev,
+ "info: InterpretIE: RSSI=%d\n", rssi);
+ fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
+ radio_type = &scan_info_tlv->radio_type;
} else {
- /* Get scan command from scan_pending_q and put to
- cmd_pending_q */
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
- true);
+ radio_type = NULL;
}
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left, fw_tsf,
+ radio_type, true, rssi);
+ if (ret)
+ goto check_next_scan;
}
+check_next_scan:
+ if (!event_scan->more_event)
+ mwifiex_check_next_scan_command(priv);
+
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 9208a8816b80..5aa3d39e48bc 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -532,8 +532,228 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
return 0;
}
+/* This function populates key material v2 command
+ * to set network key for AES & CMAC AES.
+ */
+static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_ds_encrypt_key *enc_key,
+ struct host_cmd_ds_802_11_key_material_v2 *km)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u16 size, len = KEY_PARAMS_FIXED_LEN;
+
+ if (enc_key->is_igtk_key) {
+ dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.cmac_aes.ipn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_info &= cpu_to_le16(~KEY_MCAST);
+ km->key_param_set.key_info |= cpu_to_le16(KEY_IGTK);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC;
+ km->key_param_set.key_params.cmac_aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.cmac_aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_cmac_aes_param);
+ } else {
+ dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.aes.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES;
+ km->key_param_set.key_params.aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_aes_param);
+ }
+
+ km->key_param_set.len = cpu_to_le16(len);
+ size = len + sizeof(struct mwifiex_ie_types_header) +
+ sizeof(km->action) + S_DS_GEN;
+ cmd->size = cpu_to_le16(size);
+
+ return 0;
+}
+
+/* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V2 format.
+ * Preparation includes -
+ * - Setting command ID, action and proper size
+ * - Setting WEP keys, WAPI keys or WPA keys along with required
+ * encryption (TKIP, AES) (as required)
+ * - Ensuring correct endian-ness
+ */
+static int
+mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *mac = enc_key->mac_addr;
+ u16 key_info, len = KEY_PARAMS_FIXED_LEN;
+ struct host_cmd_ds_802_11_key_material_v2 *km =
+ &cmd->params.key_material_v2;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
+ km->action = cpu_to_le16(cmd_action);
+
+ if (cmd_action == HostCmd_ACT_GEN_GET) {
+ dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+ km->key_param_set.key_idx =
+ enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+ key_info = KEY_UNICAST;
+ else
+ key_info = KEY_MCAST;
+
+ if (enc_key->is_igtk_key)
+ key_info |= KEY_IGTK;
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ memset(&km->key_param_set, 0,
+ sizeof(struct mwifiex_ie_type_key_param_set_v2));
+
+ if (enc_key->key_disable) {
+ dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ key_info = KEY_MCAST | KEY_UNICAST;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ key_info = KEY_ENABLED;
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
+ dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+ len += sizeof(struct mwifiex_wep_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ km->key_param_set.key_type = KEY_TYPE_ID_WEP;
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ } else {
+ if (enc_key->is_current_wep_key) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ if (km->key_param_set.key_idx ==
+ (priv->wep_key_curr_index & KEY_INDEX_MASK))
+ key_info |= KEY_DEFAULT;
+ } else {
+ if (mac) {
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST;
+ else
+ key_info |= KEY_UNICAST |
+ KEY_DEFAULT;
+ } else {
+ key_info |= KEY_MCAST;
+ }
+ }
+ }
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ km->key_param_set.key_params.wep.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wep.key,
+ enc_key->key_material, enc_key->key_len);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST | KEY_RX_KEY;
+ else
+ key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
+
+ if (enc_key->is_wapi_key) {
+ dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
+ memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
+ PN_LEN);
+ km->key_param_set.key_params.wapi.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wapi.key,
+ enc_key->key_material, enc_key->key_len);
+ if (is_broadcast_ether_addr(mac))
+ priv->sec_info.wapi_key_on = true;
+
+ if (!priv->sec_info.wapi_key_on)
+ key_info |= KEY_DEFAULT;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ len += sizeof(struct mwifiex_wapi_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ key_info |= KEY_DEFAULT;
+ /* Enable unicast bit for WPA-NONE/ADHOC_AES */
+ if (!priv->sec_info.wpa2_enabled &&
+ !is_broadcast_ether_addr(mac))
+ key_info |= KEY_UNICAST;
+ } else {
+ /* Enable default key for WPA/WPA2 */
+ if (!priv->wpa_is_gtk_set)
+ key_info |= KEY_DEFAULT;
+ }
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_CCMP)
+ return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
+ dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.tkip.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_TKIP;
+ km->key_param_set.key_params.tkip.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.tkip.key,
+ enc_key->key_material, enc_key->key_len);
+
+ len += sizeof(struct mwifiex_tkip_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ }
+
+ return 0;
+}
+
/*
* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V1 format.
*
* Preparation includes -
* - Setting command ID, action and proper size
@@ -542,10 +762,10 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
static int
-mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action, u32 cmd_oid,
- struct mwifiex_ds_encrypt_key *enc_key)
+mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
@@ -724,6 +944,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
return ret;
}
+/* Wrapper function for setting network key depending upon FW KEY API version */
+static int
+mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_cmd_802_11_key_material_v2(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+
+ else
+ return mwifiex_cmd_802_11_key_material_v1(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+}
+
/*
* This function prepares command to set/get 11d domain information.
*
@@ -1280,6 +1518,127 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
+ struct mwifiex_ds_tdls_oper *oper = data_buf;
+ struct mwifiex_sta_node *sta_ptr;
+ struct host_cmd_tlv_rates *tlv_rates;
+ struct mwifiex_ie_types_htcap *ht_capab;
+ struct mwifiex_ie_types_qos_info *wmm_qos_info;
+ struct mwifiex_ie_types_extcap *extcap;
+ struct mwifiex_ie_types_vhtcap *vht_capab;
+ struct mwifiex_ie_types_aid *aid;
+ u8 *pos, qos_info;
+ u16 config_len = 0;
+ struct station_parameters *params = priv->sta_params;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+
+ tdls_oper->reason = 0;
+ memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
+ sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
+
+ pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
+
+ switch (oper->tdls_action) {
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_DELETE);
+ break;
+ case MWIFIEX_TDLS_CREATE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CREATE);
+ break;
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
+
+ if (!params) {
+ dev_err(priv->adapter->dev,
+ "TDLS config params not available for %pM\n",
+ oper->peer_mac);
+ return -ENODATA;
+ }
+
+ *(__le16 *)pos = cpu_to_le16(params->capability);
+ config_len += sizeof(params->capability);
+
+ qos_info = params->uapsd_queues | (params->max_sp << 5);
+ wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
+ config_len);
+ wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+ wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
+ wmm_qos_info->qos_info = qos_info;
+ config_len += sizeof(struct mwifiex_ie_types_qos_info);
+
+ if (params->ht_capa) {
+ ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
+ config_len);
+ ht_capab->header.type =
+ cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ memcpy(&ht_capab->ht_cap, params->ht_capa,
+ sizeof(struct ieee80211_ht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_htcap);
+ }
+
+ if (params->supported_rates && params->supported_rates_len) {
+ tlv_rates = (struct host_cmd_tlv_rates *)(pos +
+ config_len);
+ tlv_rates->header.type =
+ cpu_to_le16(WLAN_EID_SUPP_RATES);
+ tlv_rates->header.len =
+ cpu_to_le16(params->supported_rates_len);
+ memcpy(tlv_rates->rates, params->supported_rates,
+ params->supported_rates_len);
+ config_len += sizeof(struct host_cmd_tlv_rates) +
+ params->supported_rates_len;
+ }
+
+ if (params->ext_capab && params->ext_capab_len) {
+ extcap = (struct mwifiex_ie_types_extcap *)(pos +
+ config_len);
+ extcap->header.type =
+ cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+ extcap->header.len = cpu_to_le16(params->ext_capab_len);
+ memcpy(extcap->ext_capab, params->ext_capab,
+ params->ext_capab_len);
+ config_len += sizeof(struct mwifiex_ie_types_extcap) +
+ params->ext_capab_len;
+ }
+ if (params->vht_capa) {
+ vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
+ config_len);
+ vht_capab->header.type =
+ cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ vht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ memcpy(&vht_capab->vht_cap, params->vht_capa,
+ sizeof(struct ieee80211_vht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_vhtcap);
+ }
+ if (params->aid) {
+ aid = (struct mwifiex_ie_types_aid *)(pos + config_len);
+ aid->header.type = cpu_to_le16(WLAN_EID_AID);
+ aid->header.len = cpu_to_le16(sizeof(params->aid));
+ aid->aid = cpu_to_le16(params->aid);
+ config_len += sizeof(struct mwifiex_ie_types_aid);
+ }
+
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+ return -ENOTSUPP;
+ }
+
+ le16_add_cpu(&cmd->size, config_len);
+
+ return 0;
+}
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1472,6 +1831,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1507,6 +1869,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 24523e4015cb..1c5e18804074 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -69,6 +69,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
+ case HostCmd_CMD_802_11_SCAN_EXT:
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_for_each_entry_safe(cmd_node, tmp_node,
@@ -561,13 +562,13 @@ static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of set/get key material.
+ * This function handles the command response of set/get v1 key material.
*
* Handling includes updating the driver parameters to reflect the
* changes.
*/
-static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp)
+static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_key_material *key =
&resp->params.key_material;
@@ -590,6 +591,51 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of set/get v2 key material.
+ *
+ * Handling includes updating the driver parameters to reflect the
+ * changes.
+ */
+static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_802_11_key_material_v2 *key_v2;
+ __le16 len;
+
+ key_v2 = &resp->params.key_material_v2;
+ if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+ if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
+ dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ priv->wpa_is_gtk_set = true;
+ priv->scan_block = false;
+ }
+ }
+
+ if (key_v2->key_param_set.key_type != KEY_TYPE_ID_AES)
+ return 0;
+
+ memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+ WLAN_KEY_LEN_CCMP);
+ priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+ key_v2->key_param_set.key_params.aes.key_len;
+ len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+ memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+ key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+
+ return 0;
+}
+
+/* Wrapper function for processing response of key material command */
+static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_ret_802_11_key_material_v2(priv, resp);
+ else
+ return mwifiex_ret_802_11_key_material_v1(priv, resp);
+}
+
+/*
* This function handles the command response of get 11d domain information.
*/
static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
@@ -800,7 +846,60 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_tdls_oper *cmd_tdls_oper = &resp->params.tdls_oper;
+ u16 reason = le16_to_cpu(cmd_tdls_oper->reason);
+ u16 action = le16_to_cpu(cmd_tdls_oper->tdls_action);
+ struct mwifiex_sta_node *node =
+ mwifiex_get_sta_entry(priv, cmd_tdls_oper->peer_mac);
+ switch (action) {
+ case ACT_TDLS_DELETE:
+ if (reason)
+ dev_err(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ else
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ break;
+ case ACT_TDLS_CREATE:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link creation for %pM failed: reason %d",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node && reason != TDLS_ERR_LINK_EXISTS)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link creation for %pM successful",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ case ACT_TDLS_CONFIG:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link config for %pM failed, reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "Unknown TDLS command action respnse %d", action);
+ return -1;
+ }
+
+ return 0;
+}
/*
* This function handles the command response for subscribe event command.
*/
@@ -871,6 +970,10 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_802_11_scan(priv, resp);
adapter->curr_cmd->wait_q_enabled = false;
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_ret_802_11_scan_ext(priv);
+ adapter->curr_cmd->wait_q_enabled = false;
+ break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
dev_dbg(adapter->dev,
@@ -999,6 +1102,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_COALESCE_CFG:
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_ret_tdls_oper(priv, resp);
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8c351f71f72f..92ff7b324b00 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->scan_block = false;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ mwifiex_disable_all_tdls_links(priv);
+
/* Free Tx and Rx packets, report disconnect to upper layer */
mwifiex_clean_txrx(priv);
@@ -331,6 +335,14 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
dev_dbg(adapter->dev, "event: PORT RELEASE\n");
break;
+ case EVENT_EXT_SCAN_REPORT:
+ dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ if (adapter->ext_scan)
+ ret = mwifiex_handle_event_ext_scan_report(priv,
+ adapter->event_skb->data);
+
+ break;
+
case EVENT_WMM_STATUS_CHANGE:
dev_dbg(adapter->dev, "event: WMM status changed\n");
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS,
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c5cb2ed19ec2..b393d55b3aa0 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -290,7 +290,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
HostCmd_SCAN_RADIO_TYPE_BG)
- config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+ config_bands = BAND_B | BAND_G | BAND_GN;
else
config_bands = BAND_A | BAND_AN | BAND_AAC;
@@ -865,6 +865,7 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct mwifiex_wep_key *wep_key;
int index;
@@ -879,10 +880,17 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
- dev_err(priv->adapter->dev,
+ dev_err(adapter->dev,
"key not set, so cannot enable it\n");
return -1;
}
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) {
+ memcpy(encrypt_key->key_material,
+ wep_key->key_material, wep_key->key_length);
+ encrypt_key->key_len = wep_key->key_length;
+ }
+
priv->wep_key_curr_index = (u16) index;
priv->sec_info.wep_enabled = 1;
} else {
@@ -897,13 +905,25 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
priv->sec_info.wep_enabled = 1;
}
if (wep_key->key_length) {
+ void *enc_key;
+
+ if (encrypt_key->key_disable)
+ memset(&priv->wep_key[index], 0,
+ sizeof(struct mwifiex_wep_key));
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ enc_key = encrypt_key;
+ else
+ enc_key = NULL;
+
/* Send request to firmware */
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ HostCmd_ACT_GEN_SET, 0, enc_key);
if (ret)
return ret;
}
+
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
@@ -1044,19 +1064,27 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = key_len;
+ encrypt_key.key_index = key_index;
if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
encrypt_key.is_igtk_key = true;
if (!disable) {
- encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
+ else
+ encrypt_key.is_current_wep_key = true;
+
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
- if (kp && kp->seq && kp->seq_len)
+ if (kp && kp->seq && kp->seq_len) {
memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
+ encrypt_key.pn_len = kp->seq_len;
+ encrypt_key.is_rx_seq_valid = true;
+ }
} else {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
@@ -1391,7 +1419,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
* with requisite parameters and calls the IOCTL handler.
*/
int
-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
+mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
{
struct mwifiex_ds_misc_gen_ie gen_ie;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 4651d676df38..b6aa958bd6e4 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -88,11 +88,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct rxpd *local_rx_pd;
int hdr_chop;
struct ethhdr *eth;
+ u16 rx_pkt_off, rx_pkt_len;
+ u8 *offset;
local_rx_pd = (struct rxpd *) (skb->data);
- rx_pkt_hdr = (void *)local_rx_pd +
- le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_off = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
@@ -142,6 +145,12 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return 0;
}
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ ntohs(rx_pkt_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
+ offset = (u8 *)local_rx_pd + rx_pkt_off;
+ mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
+ }
+
priv->rxpd_rate = local_rx_pd->rx_rate;
priv->rxpd_htinfo = local_rx_pd->ht_info;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 354d64c9606f..1236a5de7bca 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -95,6 +95,9 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
}
}
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
/* Offset of actual data */
pkt_offset = sizeof(struct txpd) + pad;
if (pkt_type == PKT_TYPE_MGMT) {
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
new file mode 100644
index 000000000000..5efd456af571
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -0,0 +1,1044 @@
+/* Marvell Wireless LAN device driver: TDLS handling
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "wmm.h"
+#include "11n.h"
+#include "11n_rxreorder.h"
+#include "11ac.h"
+
+#define TDLS_REQ_FIX_LEN 6
+#define TDLS_RESP_FIX_LEN 8
+#define TDLS_CONFIRM_FIX_LEN 6
+
+static void
+mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *tid_list;
+ struct sk_buff *skb, *tmp;
+ struct mwifiex_txinfo *tx_info;
+ unsigned long flags;
+ u32 tid;
+ u8 tid_down;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+
+ __skb_unlink(skb, &priv->tdls_txq);
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tid = skb->priority;
+ tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+
+ if (status == TDLS_SETUP_COMPLETE) {
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
+ ra_list->tdls_link = true;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ } else {
+ tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(tid_list))
+ ra_list = list_first_entry(tid_list,
+ struct mwifiex_ra_list_tbl, list);
+ else
+ ra_list = NULL;
+ tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
+ }
+
+ if (!ra_list) {
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ continue;
+ }
+
+ skb_queue_tail(&ra_list->skb_head, skb);
+
+ ra_list->ba_pkt_count++;
+ ra_list->total_pkt_count++;
+
+ if (atomic_read(&priv->wmm.highest_queued_prio) <
+ tos_to_tid_inv[tid_down])
+ atomic_set(&priv->wmm.highest_queued_prio,
+ tos_to_tid_inv[tid_down]);
+
+ atomic_inc(&priv->wmm.tx_pkts_queued);
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *ra_list_head;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
+ ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
+ list_for_each_entry(ra_list, ra_list_head, list) {
+ skb_queue_walk_safe(&ra_list->skb_head, skb,
+ tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+ __skb_unlink(skb, &ra_list->skb_head);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
+ ra_list->total_pkt_count--;
+ skb_queue_tail(&priv->tdls_txq, skb);
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
+ u16 rates_size, supp_rates_size, ext_rates_size;
+
+ memset(rates, 0, sizeof(rates));
+ rates_size = mwifiex_get_supported_rates(priv, rates);
+
+ supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
+
+ if (skb_tailroom(skb) < rates_size + 4) {
+ dev_err(priv->adapter->dev,
+ "Insuffient space while adding rates\n");
+ return -ENOMEM;
+ }
+
+ pos = skb_put(skb, supp_rates_size + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_size;
+ memcpy(pos, rates, supp_rates_size);
+
+ if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
+ ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
+ pos = skb_put(skb, ext_rates_size + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = ext_rates_size;
+ memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
+ ext_rates_size);
+ }
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_assoc_rsp *assoc_rsp;
+ u8 *pos;
+
+ assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
+ pos = (void *)skb_put(skb, 4);
+ *pos++ = WLAN_EID_AID;
+ *pos++ = 2;
+ *pos++ = le16_to_cpu(assoc_rsp->a_id);
+
+ return;
+}
+
+static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_vht_cap vht_cap;
+ u8 *pos;
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_cap);
+
+ memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
+
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
+ memcpy(pos, &vht_cap, sizeof(struct ieee80211_ht_cap));
+
+ return 0;
+}
+
+static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
+ u8 *mac, struct sk_buff *skb)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+ struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 supp_chwd_set, peer_supp_chwd_set;
+ u8 *pos, ap_supp_chwd_set, chan_bw;
+ u16 mcs_map_user, mcs_map_resp, mcs_map_result;
+ u16 mcs_user, mcs_resp, nss;
+ u32 usr_vht_cap_info;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (unlikely(!sta_ptr)) {
+ dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+ return -1;
+ }
+
+ if (!mwifiex_is_bss_in_11ac_mode(priv)) {
+ if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ dev_dbg(adapter->dev,
+ "TDLS peer doesn't support wider bandwitdh\n");
+ return 0;
+ }
+ } else {
+ ap_vht_cap = bss_desc->bcn_vht_cap;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
+ *pos++ = WLAN_EID_VHT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_vht_operation);
+ vht_oper = (struct ieee80211_vht_operation *)pos;
+
+ if (bss_desc->bss_band & BAND_A)
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+ else
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+ /* find the minmum bandwith between AP/TDLS peers */
+ vht_cap = &sta_ptr->tdls_cap.vhtcap;
+ supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
+ peer_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
+
+ /* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
+
+ if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ ap_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
+ }
+
+ switch (supp_chwd_set) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ break;
+ default:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
+ mcs_map_result = 0;
+
+ for (nss = 1; nss <= 8; nss++) {
+ mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+ mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ else
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ min_t(u16, mcs_user, mcs_resp));
+ }
+
+ vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
+
+ switch (vht_oper->chan_width) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+ vht_oper->center_freq_seg1_idx =
+ mwifiex_get_center_freq_index(priv, BAND_AAC,
+ bss_desc->channel,
+ chan_bw);
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_extcap *extcap;
+
+ extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
+ extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
+ extcap->ieee_hdr.len = 8;
+ memset(extcap->ext_capab, 0, 8);
+ extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
+}
+
+static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 3);
+
+ *pos++ = WLAN_EID_QOS_CAPA;
+ *pos++ = 1;
+ *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
+}
+
+static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_tdls_data *tf;
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+ break;
+
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
+ sizeof(struct ieee_types_header);
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ int ret;
+ u16 skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ 3 + /* Qos Info */
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
+ priv->cfg_bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+ break;
+ }
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb->priority = MWIFIEX_PRIO_BK;
+ break;
+ default:
+ skb->priority = MWIFIEX_PRIO_VI;
+ break;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+static int
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
+
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ /* add address 4 */
+ pos = skb_put(skb, ETH_ALEN);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(capab);
+ /* move back for addr4 */
+ memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+ sizeof(mgmt->u.action.u.tdls_discover_resp));
+ /* init address 4 */
+ memcpy(pos, bc_addr, ETH_ALEN);
+
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ u8 *pos;
+ u32 pkt_type, tx_control;
+ u16 pkt_len, skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len +
+ 3 + /* Qos Info */
+ ETH_ALEN; /* Address4 */
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ pkt_type = PKT_TYPE_MGMT;
+ tx_control = 0;
+ pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(pos, &pkt_type, sizeof(pkt_type));
+ memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
+
+ if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ skb)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last we are the responder */
+
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+
+ skb->priority = MWIFIEX_PRIO_VI;
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+
+ pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
+ memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
+ sizeof(pkt_len));
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+/* This function process tdls action frame from peer.
+ * Peer capabilities are stored into station node structure.
+ */
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ u8 *peer, *pos, *end;
+ u8 i, action, basic;
+ int ie_len = 0;
+
+ if (len < (sizeof(struct ethhdr) + 3))
+ return;
+ if (*(u8 *)(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
+ return;
+ if (*(u8 *)(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
+ return;
+
+ peer = buf + ETH_ALEN;
+ action = *(u8 *)(buf + sizeof(struct ethhdr) + 2);
+
+ /* just handle TDLS setup request/response/confirm */
+ if (action > WLAN_TDLS_SETUP_CONFIRM)
+ return;
+
+ dev_dbg(priv->adapter->dev,
+ "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return;
+
+ switch (action) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
+ return;
+
+ pos = buf + sizeof(struct ethhdr) + 4;
+ /* payload 1+ category 1 + action 1 + dialog 1 */
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
+ return;
+ /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+ pos = buf + sizeof(struct ethhdr) + 6;
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+ return;
+ pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+ ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ break;
+ default:
+ dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return;
+ }
+
+ for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+ if (pos + 2 + pos[1] > end)
+ break;
+
+ switch (*pos) {
+ case WLAN_EID_SUPP_RATES:
+ sta_ptr->tdls_cap.rates_len = pos[1];
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+ break;
+
+ case WLAN_EID_EXT_SUPP_RATES:
+ basic = sta_ptr->tdls_cap.rates_len;
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+ sta_ptr->tdls_cap.rates_len += pos[1];
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ sizeof(struct ieee80211_ht_cap));
+ sta_ptr->is_11n_enabled = 1;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ sizeof(struct ieee80211_ht_operation));
+ break;
+ case WLAN_EID_BSS_COEX_2040:
+ sta_ptr->tdls_cap.coex_2040 = pos[2];
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+ sizeof(struct ieee_types_header) +
+ min_t(u8, pos[1], 8));
+ break;
+ case WLAN_EID_RSN:
+ memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+ sizeof(struct ieee_types_header) + pos[1]);
+ break;
+ case WLAN_EID_QOS_CAPA:
+ sta_ptr->tdls_cap.qos_info = pos[2];
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (priv->adapter->is_hw_11ac_capable)
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ sizeof(struct ieee80211_vht_operation));
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (priv->adapter->is_hw_11ac_capable) {
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ sizeof(struct ieee80211_vht_cap));
+ sta_ptr->is_11ac_enabled = 1;
+ }
+ break;
+ case WLAN_EID_AID:
+ if (priv->adapter->is_hw_11ac_capable)
+ sta_ptr->tdls_cap.aid =
+ le16_to_cpu(*(__le16 *)(pos + 2));
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static int
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
+ dev_err(priv->adapter->dev,
+ "link absent for peer %pM; cannot config\n", peer);
+ return -EINVAL;
+ }
+
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper);
+}
+
+static int
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
+ dev_dbg(priv->adapter->dev,
+ "Setup already in progress for peer %pM\n", peer);
+ return 0;
+ }
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return -ENOMEM;
+
+ sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
+ mwifiex_hold_tdls_packets(priv, peer);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper);
+}
+
+static int
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr) {
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+ mwifiex_del_sta_entry(priv, peer);
+ }
+
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper);
+}
+
+static int
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct ieee80211_mcs_info mcs;
+ unsigned long flags;
+ int i;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM success\n", peer);
+
+ sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
+
+ mcs = sta_ptr->tdls_cap.ht_capb.mcs;
+ if (mcs.rx_mask[0] != 0xff)
+ sta_ptr->is_11n_enabled = true;
+ if (sta_ptr->is_11n_enabled) {
+ if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU)
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_8K;
+ else
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ } else {
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM failed\n", peer);
+ if (sta_ptr) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_del_sta_entry(priv, peer);
+ }
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+{
+ switch (action) {
+ case MWIFIEX_TDLS_ENABLE_LINK:
+ return mwifiex_tdls_process_enable_link(priv, peer);
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ return mwifiex_tdls_process_disable_link(priv, peer);
+ case MWIFIEX_TDLS_CREATE_LINK:
+ return mwifiex_tdls_process_create_link(priv, peer);
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ return mwifiex_tdls_process_config_link(priv, peer);
+ }
+ return 0;
+}
+
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (sta_ptr)
+ return sta_ptr->tdls_status;
+
+ return TDLS_NOT_SETUP;
+}
+
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ if (list_empty(&priv->sta_list))
+ return;
+
+ list_for_each_entry(sta_ptr, &priv->sta_list, list) {
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+
+ mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
+ TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper))
+ dev_warn(priv->adapter->dev,
+ "Disable link failed for TDLS peer %pM",
+ sta_ptr->mac_addr);
+ }
+
+ mwifiex_del_all_sta_list(priv);
+}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 718066577c6c..2d47ba70225c 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -21,126 +21,8 @@
#include "main.h"
#include "11n.h"
-/*
- * This function will return the pointer to station entry in station list
- * table which matches specified mac address.
- * This function should be called after acquiring RA list spinlock.
- * NULL is returned if station entry is not found in associated STA list.
- */
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
-
- if (!mac)
- return NULL;
-
- list_for_each_entry(node, &priv->sta_list, list) {
- if (!memcmp(node->mac_addr, mac, ETH_ALEN))
- return node;
- }
- return NULL;
-}
-
-/*
- * This function will add a sta_node entry to associated station list
- * table with the given mac address.
- * If entry exist already, existing entry is returned.
- * If received mac address is NULL, NULL is returned.
- */
-static struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
- if (!mac)
- return NULL;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- node = mwifiex_get_sta_entry(priv, mac);
- if (node)
- goto done;
-
- node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
- if (!node)
- goto done;
-
- memcpy(node->mac_addr, mac, ETH_ALEN);
- list_add_tail(&node->list, &priv->sta_list);
-
-done:
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return node;
-}
-
-/*
- * This function will search for HT IE in association request IEs
- * and set station HT parameters accordingly.
- */
-static void
-mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
- int ies_len, struct mwifiex_sta_node *node)
-{
- const struct ieee80211_ht_cap *ht_cap;
-
- if (!ies)
- return;
-
- ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
- if (ht_cap) {
- node->is_11n_enabled = 1;
- node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
- IEEE80211_HT_CAP_MAX_AMSDU ?
- MWIFIEX_TX_DATA_BUF_SIZE_8K :
- MWIFIEX_TX_DATA_BUF_SIZE_4K;
- } else {
- node->is_11n_enabled = 0;
- }
-
- return;
-}
-
-/*
- * This function will delete a station entry from station list
- */
-static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- node = mwifiex_get_sta_entry(priv, mac);
- if (node) {
- list_del(&node->list);
- kfree(node);
- }
-
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
-
-/*
- * This function will delete all stations from associated station list.
- */
-static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
-{
- struct mwifiex_sta_node *node, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
- INIT_LIST_HEAD(&priv->sta_list);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
/*
* This function handles AP interface specific events generated by firmware.
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 9b82e225880c..8d37bfc578bd 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -252,3 +252,117 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
return 0;
}
+
+/* This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+
+ if (!mac)
+ return NULL;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+ return node;
+ }
+
+ return NULL;
+}
+
+/* This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ if (!mac)
+ return NULL;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node)
+ goto done;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node)
+ goto done;
+
+ memcpy(node->mac_addr, mac, ETH_ALEN);
+ list_add_tail(&node->list, &priv->sta_list);
+
+done:
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return node;
+}
+
+/* This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node)
+{
+ const struct ieee80211_ht_cap *ht_cap;
+
+ if (!ies)
+ return;
+
+ ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+ if (ht_cap) {
+ node->is_11n_enabled = 1;
+ node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ } else {
+ node->is_11n_enabled = 0;
+ }
+
+ return;
+}
+
+/* This function will delete a station entry from station list */
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
+
+/* This function will delete all stations from associated station list. */
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *node, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ INIT_LIST_HEAD(&priv->sta_list);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index cb2d0582bd36..ddae57021397 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -30,8 +30,24 @@ static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
}
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa)
+struct mwifiex_dma_mapping {
+ dma_addr_t addr;
+ size_t len;
+};
+
+static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
+ struct mwifiex_dma_mapping *mapping)
{
- memcpy(buf_pa, skb->cb, sizeof(dma_addr_t));
+ memcpy(mapping, skb->cb, sizeof(*mapping));
}
+
+static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
+{
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+
+ return mapping.addr;
+}
+
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 13eaeed03898..e0ba0115e5ae 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -64,21 +64,6 @@ static u8 tos_to_tid[] = {
0x07 /* 1 1 1 AC_VO */
};
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static u8 tos_to_tid_inv[] = {
- 0x02, /* from tos_to_tid[2] = 0 */
- 0x00, /* from tos_to_tid[0] = 1 */
- 0x01, /* from tos_to_tid[1] = 2 */
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07};
-
static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
/*
@@ -175,8 +160,15 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
break;
ra_list->is_11n_enabled = 0;
+ ra_list->tdls_link = false;
if (!mwifiex_queuing_ra_based(priv)) {
- ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ if (mwifiex_get_tdls_link_status(priv, ra) ==
+ TDLS_SETUP_COMPLETE) {
+ ra_list->is_11n_enabled =
+ mwifiex_tdls_peer_11n_enabled(priv, ra);
+ } else {
+ ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ }
} else {
ra_list->is_11n_enabled =
mwifiex_is_sta_11n_enabled(priv, node);
@@ -213,8 +205,9 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
* This function map ACs to TIDs.
*/
static void
-mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
+mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
{
+ struct mwifiex_wmm_desc *wmm = &priv->wmm;
u8 *queue_priority = wmm->queue_priority;
int i;
@@ -224,7 +217,7 @@ mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
}
for (i = 0; i < MAX_NUM_TID; ++i)
- tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
+ priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
}
@@ -285,7 +278,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
}
}
- mwifiex_wmm_queue_priorities_tid(&priv->wmm);
+ mwifiex_wmm_queue_priorities_tid(priv);
}
/*
@@ -388,8 +381,7 @@ mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
* AP is disabled (due to call admission control (ACM bit). Mapping
* of TID to AC is taken care of internally.
*/
-static u8
-mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
{
enum mwifiex_wmm_ac_e ac, ac_down;
u8 new_tid;
@@ -421,9 +413,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
continue;
for (i = 0; i < MAX_NUM_TID; ++i) {
- priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].amsdu = priv->tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].ampdu_ap =
+ priv->tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].ampdu_user =
+ priv->tos_to_tid_inv[i];
}
priv->aggr_prio_tbl[6].amsdu
@@ -546,6 +540,7 @@ void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
unsigned long flags;
+ struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
@@ -562,6 +557,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
if (priv->adapter->if_ops.clean_pcie_ring)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
/*
@@ -590,7 +588,7 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
* If no such node is found, a new node is added first and then
* retrieved.
*/
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -640,6 +638,21 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
unsigned long flags;
+ struct list_head list_head;
+ int tdls_status = TDLS_NOT_SETUP;
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+
+ memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
+ if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
+ dev_dbg(adapter->dev,
+ "TDLS setup packet for %pM. Don't block\n", ra);
+ else
+ tdls_status = mwifiex_get_tdls_link_status(priv, ra);
+ }
if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
@@ -658,12 +671,27 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
have only 1 raptr for a tid in case of infra */
if (!mwifiex_queuing_ra_based(priv) &&
!mwifiex_is_skb_mgmt_frame(skb)) {
- if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
- ra_list = list_first_entry(
- &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
- struct mwifiex_ra_list_tbl, list);
- else
- ra_list = NULL;
+ switch (tdls_status) {
+ case TDLS_SETUP_COMPLETE:
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
+ ra);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ break;
+ case TDLS_SETUP_INPROGRESS:
+ skb_queue_tail(&priv->tdls_txq, skb);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ return;
+ default:
+ list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(&list_head))
+ ra_list = list_first_entry(
+ &list_head, struct mwifiex_ra_list_tbl,
+ list);
+ else
+ ra_list = NULL;
+ break;
+ }
} else {
memcpy(ra, skb->data, ETH_ALEN);
if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
@@ -683,9 +711,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
- tos_to_tid_inv[tid_down])
+ priv->tos_to_tid_inv[tid_down])
atomic_set(&priv->wmm.highest_queued_prio,
- tos_to_tid_inv[tid_down]);
+ priv->tos_to_tid_inv[tid_down]);
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -1226,7 +1254,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
} else {
- if (mwifiex_is_ampdu_allowed(priv, tid) &&
+ if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
ptr->ba_pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 0f129d498fb1..83e42083ebff 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -34,6 +34,21 @@ enum ieee_types_wmm_ecw_bitmasks {
static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
/*
+ * This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+static const u8 tos_to_tid_inv[] = {
+ 0x02, /* from tos_to_tid[2] = 0 */
+ 0x00, /* from tos_to_tid[0] = 1 */
+ 0x01, /* from tos_to_tid[1] = 2 */
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07};
+
+/*
* This function retrieves the TID of the given RA list.
*/
static inline int
@@ -107,5 +122,8 @@ void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
const struct host_cmd_ds_command *resp);
+struct mwifiex_ra_list_tbl *
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 5028557aa18a..2e89a865a67d 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2835,7 +2835,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(usbdev->net, bssid,
+ get_current_channel(usbdev, NULL),
+ GFP_KERNEL);
kfree(info);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index caddc1b427a9..14a90ddf585c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -125,9 +125,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
if (unlikely(tout))
- rt2x00_warn(entry->queue->rt2x00dev,
- "TX status timeout for entry %d in queue %d\n",
- entry->entry_idx, entry->queue->qid);
+ rt2x00_dbg(entry->queue->rt2x00dev,
+ "TX status timeout for entry %d in queue %d\n",
+ entry->entry_idx, entry->queue->qid);
return tout;
}
@@ -566,8 +566,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(rt2x00queue_empty(queue))) {
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
+ rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
break;
}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 3867d1470b36..7980ab1f9eca 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -619,13 +619,13 @@ static int rtl8180_start(struct ieee80211_hw *dev)
if (priv->r8185) {
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index fd78df813a85..c981bcfb6cef 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -785,7 +785,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
/* Auto Rate Fallback Register (ARFR): 1M-54M setting */
@@ -943,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -986,13 +986,13 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee067f324..a6ad79f61bf9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
#ifndef RTL8187_H
#define RTL8187_H
+#include <linux/cache.h>
+
#include "rtl818x.h"
#include "leds.h"
@@ -139,7 +141,10 @@ struct rtl8187_priv {
u8 aifsn[4];
u8 rfkill_mask;
struct {
- __le64 buf;
+ union {
+ __le64 buf;
+ u8 dummy1[L1_CACHE_BYTES];
+ } ____cacheline_aligned;
struct sk_buff_head queue;
} b_tx_status; /* This queue is used by both -b and non-b devices */
struct mutex io_mutex;
@@ -147,7 +152,8 @@ struct rtl8187_priv {
u8 bits8;
__le16 bits16;
__le32 bits32;
- } *io_dmabuf;
+ u8 dummy2[L1_CACHE_BYTES];
+ } *io_dmabuf ____cacheline_aligned;
bool rfkill_off;
u16 seqno;
};
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index ce23dfd42381..fa7f7f61ea26 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -144,9 +144,9 @@ struct rtl818x_csr {
__le32 HSSI_PARA;
u8 reserved_13[4];
u8 TX_AGC_CTL;
-#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0)
-#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1)
-#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
+#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN (1 << 0)
+#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL (1 << 1)
+#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
u8 TX_GAIN_CCK;
u8 TX_GAIN_OFDM;
u8 TX_ANTENNA;
@@ -158,8 +158,8 @@ struct rtl818x_csr {
u8 SLOT;
u8 reserved_16[5];
u8 CW_CONF;
-#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0)
-#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1)
+#define RTL818X_CW_CONF_PERPACKET_CW (1 << 0)
+#define RTL818X_CW_CONF_PERPACKET_RETRY (1 << 1)
u8 CW_VAL;
u8 RATE_FALLBACK;
#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7)
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index deedae3c5449..d1c0191a195b 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
- return 1;
+ return false;
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index a98acefb8c06..ee28a1a3d010 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -260,8 +260,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
kfree(rate_priv);
}
-static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
+static const struct rate_control_ops rtl_rate_ops = {
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index aece6c9cccf1..27ace3054d56 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/* During testing, hdr was NULL */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a1996c..2eb0b38384dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
bool is92c;
int err;
u8 tmp_u1b;
+ unsigned long flags;
rtlpci->being_init_adapter = true;
+
+ /* Since this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- return err;
+ goto exit;
}
rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpci->being_init_adapter = false;
return err;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52abf0a862fa..114858d46158 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/* In testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 27efbcdac6a9..163a681962c6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/* during testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index 50b7be3f3a60..721162cacc3a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/* during testing, hdr could be NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index be7129ba16ad..d50dfac91631 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1378,7 +1378,7 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
- if (wl->fw_status_1->tx_results_counter ==
+ if (wl->fw_status->tx_results_counter ==
(wl->tx_results_count & 0xff))
return 0;
@@ -1438,6 +1438,37 @@ out:
return ret;
}
+static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl12xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+}
+
static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
@@ -1677,6 +1708,7 @@ static struct wlcore_ops wl12xx_ops = {
.tx_delayed_compl = wl12xx_tx_delayed_compl,
.hw_init = wl12xx_hw_init,
.init_vif = NULL,
+ .convert_fw_status = wl12xx_convert_fw_status,
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
@@ -1711,22 +1743,53 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
},
};
+static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl12xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl12xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl12xx_iface_limits),
+ .num_different_channels = 1,
+ },
+};
+
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
+ BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
+
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 1;
+ wl->num_links = WL12XX_MAX_LINKS;
+ wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl12xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl12xx_fw_status);
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+ wl->ofdm_only_ap = true;
wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 9e5484a73667..75c92658bfea 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -65,6 +65,9 @@
#define WL12XX_RX_BA_MAX_SESSIONS 3
+#define WL12XX_MAX_AP_STATIONS 8
+#define WL12XX_MAX_LINKS 12
+
struct wl127x_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
@@ -79,4 +82,54 @@ struct wl12xx_priv {
struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
+struct wl12xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl12xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL12XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl12xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+} __packed;
+
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ec37b16585df..de5b4fa5d166 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -648,7 +648,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -1133,6 +1133,39 @@ static int wl18xx_hw_init(struct wl1271 *wl)
return ret;
}
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl18xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+ fw_status->priv = &int_fw_status->priv;
+}
+
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
@@ -1572,7 +1605,7 @@ static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
/* suspended links are never high priority */
@@ -1594,7 +1627,7 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
@@ -1632,6 +1665,7 @@ static struct wlcore_ops wl18xx_ops = {
.tx_immediate_compl = wl18xx_tx_immediate_completion,
.tx_delayed_compl = NULL,
.hw_init = wl18xx_hw_init,
+ .convert_fw_status = wl18xx_convert_fw_status,
.set_tx_desc_csum = wl18xx_set_tx_desc_csum,
.get_pg_ver = wl18xx_get_pg_ver,
.set_rx_csum = wl18xx_set_rx_csum,
@@ -1713,19 +1747,62 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
},
};
+static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl18xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl18xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_limits),
+ .num_different_channels = 2,
+ },
+ {
+ .max_interfaces = 2,
+ .limits = wl18xx_iface_ap_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
+ .num_different_channels = 1,
+ }
+};
+
static int wl18xx_setup(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
int ret;
+ BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
+
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 2;
+ wl->num_links = WL18XX_MAX_LINKS;
+ wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl18xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl18xx_fw_status);
wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 57c694396647..be1ebd55ac88 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -32,7 +32,7 @@ static
void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
struct ieee80211_tx_rate *rate)
{
- u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+ u8 fw_rate = wl->fw_status->counters.tx_last_rate;
if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -139,7 +139,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
struct wl18xx_priv *priv = wl->priv;
u8 i;
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 9204e07ee432..eb7cfe817010 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 5
+#define WL18XX_IFTYPE_VER 8
#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER 39
+#define WL18XX_MINOR_VER 13
#define WL18XX_CMD_MAX_SIZE 740
@@ -40,7 +40,10 @@
#define WL18XX_NUM_MAC_ADDRESSES 3
-#define WL18XX_RX_BA_MAX_SESSIONS 5
+#define WL18XX_RX_BA_MAX_SESSIONS 13
+
+#define WL18XX_MAX_AP_STATIONS 10
+#define WL18XX_MAX_LINKS 16
struct wl18xx_priv {
/* buffer for sending commands to FW */
@@ -109,6 +112,59 @@ struct wl18xx_fw_status_priv {
u8 padding[3];
};
+struct wl18xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl18xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+
+ /* Private status to be used by the lower drivers */
+ struct wl18xx_fw_status_priv priv;
+} __packed;
+
#define WL18XX_PHY_VERSION_MAX_LEN 20
struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ec83675a2446..b924ceadc02c 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -358,7 +358,8 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
- wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
+ wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
+ enable_filter);
if (enable_filter &&
wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
@@ -1591,7 +1592,8 @@ out:
return ret;
}
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr)
{
struct wl1271_acx_inconnection_sta *acx = NULL;
int ret;
@@ -1603,6 +1605,7 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
return -ENOMEM;
memcpy(acx->addr, addr, ETH_ALEN);
+ acx->role_id = wlvif->role_id;
ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
acx, sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6dcfad9b0472..954d57ec98f4 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -824,7 +824,8 @@ struct wl1271_acx_inconnection_sta {
struct acx_header header;
u8 addr[ETH_ALEN];
- u8 padding1[2];
+ u8 role_id;
+ u8 padding;
} __packed;
/*
@@ -1118,7 +1119,8 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 9b2ecf52449f..40dc30f4faaa 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,8 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
u16 status;
u16 poll_count = 0;
- if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
- id != CMD_STOP_FWLOGGER))
+ if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
+ id != CMD_STOP_FWLOGGER))
return -EIO;
cmd = buf;
@@ -312,8 +312,8 @@ static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
- u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
- if (link >= WL12XX_MAX_LINKS)
+ u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
+ if (link >= wl->num_links)
return -EBUSY;
wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
@@ -324,9 +324,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- /* take the last "freed packets" value from the current FW status */
- wl->links[link].prev_freed_pkts =
- wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+ /*
+ * take the last "freed packets" value from the current FW status.
+ * on recovery, we might not have fw_status yet, and
+ * tx_lnk_free_pkts will be NULL. check for it.
+ */
+ if (wl->fw_status->counters.tx_lnk_free_pkts)
+ wl->links[link].prev_freed_pkts =
+ wl->fw_status->counters.tx_lnk_free_pkts[link];
wl->links[link].wlvif = wlvif;
/*
@@ -1527,6 +1532,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
cmd->session_id = wl->session_ids[hlid];
+ cmd->role_id = wlvif->role_id;
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1563,7 +1569,8 @@ out:
return ret;
}
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
@@ -1581,6 +1588,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
/* We never send a deauth, mac80211 is in charge of this */
cmd->reason_opcode = 0;
cmd->send_deauth_flag = 0;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 323d4a856e4b..b084830a61cf 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -88,7 +88,8 @@ int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum ieee80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
@@ -206,7 +207,7 @@ enum cmd_templ {
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
#define WL1271_CMD_TEMPL_MAX_SIZE 512
-#define WL1271_EVENT_TIMEOUT 1500
+#define WL1271_EVENT_TIMEOUT 5000
struct wl1271_cmd_header {
__le16 id;
@@ -594,6 +595,8 @@ struct wl12xx_cmd_add_peer {
u8 sp_len;
u8 wmm;
u8 session_id;
+ u8 role_id;
+ u8 padding[3];
} __packed;
struct wl12xx_cmd_remove_peer {
@@ -602,7 +605,7 @@ struct wl12xx_cmd_remove_peer {
u8 hlid;
u8 reason_opcode;
u8 send_deauth_flag;
- u8 padding1;
+ u8 role_id;
} __packed;
/*
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 8d3b34965db3..1f9a36031b06 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -67,7 +67,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
u8 hlid;
struct wl1271_link *lnk;
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
- WL12XX_MAX_LINKS) {
+ wl->num_links) {
lnk = &wl->links[hlid];
if (!lnk->ba_bitmap)
continue;
@@ -172,7 +172,7 @@ static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
const u8 *addr;
int h;
- for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ for_each_set_bit(h, &sta_bitmap, wl->num_links) {
bool found = false;
/* find the ap vif connected to this sta */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 51f8d634d32f..1555ff970050 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -106,6 +106,15 @@ wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return 0;
}
+static inline void
+wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ BUG_ON(!wl->ops->convert_fw_status);
+
+ wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
+}
+
static inline u32
wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 7699f9d07e26..199e94120864 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -287,8 +287,8 @@ static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
if (ret < 0)
return ret;
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ /* disable beacon filtering until we get the first beacon */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
@@ -462,7 +462,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
+ if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_ENABLED_RATES;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 07e3d6a049ad..0305729d0986 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -60,7 +60,9 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
@@ -76,7 +78,9 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b46b3116cc55..7aae5b3a0c2c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -345,24 +345,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
* Start high-level PS if the STA is asleep with enough blocks in FW.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- else if (wl->active_link_count > 3 && fw_ps &&
+ else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- struct wl_fw_status_2 *status)
+ struct wl_fw_status *status)
{
u32 cur_fw_ps_map;
u8 hlid;
- cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ cur_fw_ps_map = status->link_ps_bitmap;
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
wl1271_debug(DEBUG_PSM,
"link ps prev 0x%x cur 0x%x changed 0x%x",
@@ -372,77 +372,73 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
wl->links[hlid].allocated_pkts);
}
-static int wlcore_fw_status(struct wl1271 *wl,
- struct wl_fw_status_1 *status_1,
- struct wl_fw_status_2 *status_2)
+static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
- size_t status_len;
int ret;
struct wl1271_link *lnk;
- status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*status_2) + wl->fw_status_priv_len;
-
- ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
- status_len, false);
+ ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
+ wl->raw_fw_status,
+ wl->fw_status_len, false);
if (ret < 0)
return ret;
+ wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
- status_1->intr,
- status_1->fw_rx_counter,
- status_1->drv_rx_counter,
- status_1->tx_results_counter);
+ status->intr,
+ status->fw_rx_counter,
+ status->drv_rx_counter,
+ status->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
- (status_2->counters.tx_released_pkts[i] -
+ (status->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
+ wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
- for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wl->links_map, wl->num_links) {
u8 diff;
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
- diff = (status_2->counters.tx_lnk_free_pkts[i] -
+ diff = (status->counters.tx_lnk_free_pkts[i] -
lnk->prev_freed_pkts) & 0xff;
if (diff == 0)
continue;
lnk->allocated_pkts -= diff;
- lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+ lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
/* accumulate the prev_freed_pkts counter */
lnk->total_freed_pkts += diff;
}
/* prevent wrap-around in total blocks counter */
- if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status_2->total_released_blks)))
- freed_blocks = le32_to_cpu(status_2->total_released_blks) -
+ if (likely(wl->tx_blocks_freed <= status->total_released_blks))
+ freed_blocks = status->total_released_blks -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status_2->total_released_blks);
+ status->total_released_blks;
- wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
+ wl->tx_blocks_freed = status->total_released_blks;
wl->tx_allocated_blocks -= freed_blocks;
@@ -458,7 +454,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
cancel_delayed_work(&wl->tx_watchdog_work);
}
- avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
+ avail = status->tx_total - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
@@ -477,15 +473,15 @@ static int wlcore_fw_status(struct wl1271 *wl,
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status_2);
+ wl12xx_irq_update_links_status(wl, wlvif, status);
}
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status_2->fw_localtime);
+ (s64)(status->fw_localtime);
- wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+ wl->fw_fast_lnk_map = status->link_fast_bitmap;
return 0;
}
@@ -549,13 +545,13 @@ static int wlcore_irq_locked(struct wl1271 *wl)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
wlcore_hw_tx_immediate_compl(wl);
- intr = le32_to_cpu(wl->fw_status_1->intr);
+ intr = wl->fw_status->intr;
intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
@@ -584,7 +580,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- ret = wlcore_rx(wl, wl->fw_status_1);
+ ret = wlcore_rx(wl, wl->fw_status);
if (ret < 0)
goto out;
@@ -786,10 +782,11 @@ out:
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
- WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
-
/* Avoid a recursive recovery */
if (wl->state == WLCORE_STATE_ON) {
+ WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
+ &wl->flags));
+
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
wl1271_ps_elp_wakeup(wl);
@@ -843,11 +840,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
- addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+ addr = wl->fw_status->log_start_addr;
if (!addr)
goto out;
@@ -990,23 +987,23 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*wl->fw_status_2) +
- wl->fw_status_priv_len, GFP_KERNEL);
- if (!wl->fw_status_1)
- return -ENOMEM;
+ wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
+ if (!wl->raw_fw_status)
+ goto err;
- wl->fw_status_2 = (struct wl_fw_status_2 *)
- (((u8 *) wl->fw_status_1) +
- WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+ wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
+ if (!wl->fw_status)
+ goto err;
wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
- if (!wl->tx_res_if) {
- kfree(wl->fw_status_1);
- return -ENOMEM;
- }
+ if (!wl->tx_res_if)
+ goto err;
return 0;
+err:
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ return -ENOMEM;
}
static int wl12xx_set_power_on(struct wl1271 *wl)
@@ -1767,6 +1764,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
flush_work(&wl->tx_work);
flush_delayed_work(&wl->elp_work);
+ /*
+ * Cancel the watchdog even if above tx_flush failed. We will detect
+ * it on resume anyway.
+ */
+ cancel_delayed_work(&wl->tx_watchdog_work);
+
return 0;
}
@@ -1824,6 +1827,13 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
out:
wl->wow_enabled = false;
+
+ /*
+ * Set a flag to re-init the watchdog on the first Tx after resume.
+ * That way we avoid possible conditions where Tx-complete interrupts
+ * fail to arrive and we perform a spurious recovery.
+ */
+ set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
mutex_unlock(&wl->mutex);
return 0;
@@ -1914,6 +1924,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
memset(wl->session_ids, 0, sizeof(wl->session_ids));
+ memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
wl->active_sta_count = 0;
wl->active_link_count = 0;
@@ -1938,9 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl1271_debugfs_reset(wl);
- kfree(wl->fw_status_1);
- wl->fw_status_1 = NULL;
- wl->fw_status_2 = NULL;
+ kfree(wl->raw_fw_status);
+ wl->raw_fw_status = NULL;
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
@@ -2571,10 +2583,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
ieee80211_scan_completed(wl->hw, true);
}
- if (wl->sched_vif == wlvif) {
- ieee80211_sched_scan_stopped(wl->hw);
+ if (wl->sched_vif == wlvif)
wl->sched_vif = NULL;
- }
if (wl->roc_vif == vif) {
wl->roc_vif = NULL;
@@ -2931,6 +2941,11 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
+
+ /* disable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
}
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
@@ -3463,6 +3478,10 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
key_idx);
+ /* we don't handle unsetting of default key */
+ if (key_idx == -1)
+ return;
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
@@ -4298,6 +4317,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
}
+ if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ if (ret < 0)
+ goto out;
+ }
+
ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
@@ -4651,7 +4677,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
int ret;
- if (wl->active_sta_count >= AP_MAX_STATIONS) {
+ if (wl->active_sta_count >= wl->max_ap_stations) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
@@ -4754,7 +4780,7 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
return -EINVAL;
- ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
+ ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
if (ret < 0)
return ret;
@@ -5679,28 +5705,6 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
}
-static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
- {
- .max = 3,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static struct ieee80211_iface_combination
-wlcore_iface_combinations[] = {
- {
- .max_interfaces = 3,
- .limits = wlcore_iface_limits,
- .n_limits = ARRAY_SIZE(wlcore_iface_limits),
- },
-};
-
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
int i;
@@ -5733,7 +5737,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_QUEUE_CONTROL;
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_CHANCTX_STA_CSA;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5821,10 +5826,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
- wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
- wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
- wl->hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(wlcore_iface_combinations);
+ wl->hw->wiphy->iface_combinations = wl->iface_combinations;
+ wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
SET_IEEE80211_DEV(wl->hw, wl->dev);
@@ -5844,8 +5847,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
-
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
@@ -5867,8 +5868,12 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->hw = hw;
+ /*
+ * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
+ * we don't allocate any additional resource here, so that's fine.
+ */
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < WL12XX_MAX_LINKS; j++)
+ for (j = 0; j < WLCORE_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
@@ -6011,7 +6016,8 @@ int wlcore_free_hw(struct wl1271 *wl)
kfree(wl->nvs);
wl->nvs = NULL;
- kfree(wl->fw_status_1);
+ kfree(wl->raw_fw_status);
+ kfree(wl->fw_status);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 26bfc365ba70..b52516eed7b2 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -280,7 +280,11 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- if (test_bit(hlid, &wl->ap_ps_map))
+ if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
+ return;
+
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
+ test_bit(hlid, &wl->ap_ps_map))
return;
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 6791a1a6afba..e125974285cc 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -203,9 +203,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return is_data;
}
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
{
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
@@ -263,12 +263,12 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
wl->aggr_buf + pkt_offset,
pkt_len, rx_align,
&hlid) == 1) {
- if (hlid < WL12XX_MAX_LINKS)
+ if (hlid < wl->num_links)
__set_bit(hlid, active_hlids);
else
WARN(1,
- "hlid exceeded WL12XX_MAX_LINKS "
- "(%d)\n", hlid);
+ "hlid (%d) exceeded MAX_LINKS\n",
+ hlid);
}
wl->rx_counter++;
@@ -302,7 +302,7 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
{
int ret;
- if (wl->rx_filter_enabled[index] == enable) {
+ if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
wl1271_warning("Request to enable an already "
"enabled rx filter %d", index);
return 0;
@@ -316,7 +316,10 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
return ret;
}
- wl->rx_filter_enabled[index] = enable;
+ if (enable)
+ __set_bit(index, wl->rx_filter_enabled);
+ else
+ __clear_bit(index, wl->rx_filter_enabled);
return 0;
}
@@ -326,7 +329,7 @@ int wl1271_rx_filter_clear_all(struct wl1271 *wl)
int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
- if (!wl->rx_filter_enabled[i])
+ if (!test_bit(i, wl->rx_filter_enabled))
continue;
ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
if (ret)
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 3363f60fb7da..a3b1618db27c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -142,7 +142,7 @@ struct wl1271_rx_descriptor {
u8 reserved;
} __packed;
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 87cd707affa2..40b43115f835 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -101,7 +101,7 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
/*
* ROC for 1 second on the AP channel for completing the connection.
@@ -134,12 +134,12 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
* into high-level PS and clean out its TX queues.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- if (wl->active_link_count > 3 && fw_ps &&
+ if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -234,8 +234,13 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
- /* If the FW was empty before, arm the Tx watchdog */
- if (wl->tx_allocated_blocks == total_blocks)
+ /*
+ * If the FW was empty before, arm the Tx watchdog. Also do
+ * this on the first Tx after resume, as we always cancel the
+ * watchdog on suspend.
+ */
+ if (wl->tx_allocated_blocks == total_blocks ||
+ test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
wl12xx_rearm_tx_watchdog_locked(wl);
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -357,6 +362,10 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
+ /* send EAPOL frames as voice */
+ if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
+ tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wlcore_hw_set_tx_desc_csum(wl, desc, skb);
@@ -560,11 +569,11 @@ static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
int i, h, start_hlid;
/* start from the link after the last one */
- start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < WL12XX_MAX_LINKS; i++) {
- h = (start_hlid + i) % WL12XX_MAX_LINKS;
+ for (i = 0; i < wl->num_links; i++) {
+ h = (start_hlid + i) % wl->num_links;
/* only consider connected stations */
if (!test_bit(h, wlvif->links_map))
@@ -688,8 +697,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
- WL12XX_MAX_LINKS;
+ wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
+ wl->num_links;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -722,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
timeout = wl->conf.rx_streaming.duration;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
bool found = false;
- for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ for_each_set_bit(hlid, active_hlids, wl->num_links) {
if (test_bit(hlid, wlvif->links_map)) {
found = true;
break;
@@ -759,7 +768,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
int ret = 0;
int bus_ret = 0;
u8 hlid;
@@ -1061,7 +1070,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
int i;
/* TX failure */
- for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wlvif->links_map, wl->num_links) {
if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
/* this calls wl12xx_free_link */
@@ -1085,7 +1094,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
/* only reset the queues if something bad happened */
if (wl1271_tx_total_queue_count(wl) != 0) {
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
for (i = 0; i < NUM_TX_QUEUES; i++)
@@ -1178,7 +1187,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
WL1271_TX_FLUSH_TIMEOUT / 1000);
/* forcibly flush all Tx buffers on our queues */
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
out_wake:
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 35489c300da1..79cb3ff8b71f 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -37,6 +37,7 @@
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
+#define TX_HW_ATTR_EAPOL_FRAME BIT(15)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 06efc12a39e5..95a54504f0cc 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -73,6 +73,8 @@ struct wlcore_ops {
void (*tx_immediate_compl)(struct wl1271 *wl);
int (*hw_init)(struct wl1271 *wl);
int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+ void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status);
u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
@@ -220,7 +222,7 @@ struct wl1271 {
int channel;
u8 system_hlid;
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long rate_policies_map[
@@ -228,7 +230,7 @@ struct wl1271 {
unsigned long klv_templates_map[
BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
- u8 session_ids[WL12XX_MAX_LINKS];
+ u8 session_ids[WLCORE_MAX_LINKS];
struct list_head wlvif_list;
@@ -346,8 +348,8 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl_fw_status_1 *fw_status_1;
- struct wl_fw_status_2 *fw_status_2;
+ void *raw_fw_status;
+ struct wl_fw_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
/* Current chipset configuration */
@@ -376,7 +378,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[WL12XX_MAX_LINKS];
+ struct wl1271_link links[WLCORE_MAX_LINKS];
/* number of currently active links */
int active_link_count;
@@ -405,6 +407,9 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+ /* Flag determining whether AP should broadcast OFDM-only rates */
+ bool ofdm_only_ap;
+
/* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif;
@@ -434,6 +439,10 @@ struct wl1271 {
u32 num_tx_desc;
/* number of RX descriptors the HW supports. */
u32 num_rx_desc;
+ /* number of links the HW supports */
+ u8 num_links;
+ /* max stations a single AP can support */
+ u8 max_ap_stations;
/* translate HW Tx rates to standard rate-indices */
const u8 **band_rate_to_idx;
@@ -448,10 +457,11 @@ struct wl1271 {
struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
/* size of the private FW status data */
+ size_t fw_status_len;
size_t fw_status_priv_len;
/* RX Data filter rule state - enabled/disabled */
- bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+ unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)];
/* size of the private static data */
size_t static_data_priv_len;
@@ -476,8 +486,9 @@ struct wl1271 {
struct completion nvs_loading_complete;
- /* number of concurrent channels the HW supports */
- u32 num_channels;
+ /* interface combinations supported by the hw */
+ const struct ieee80211_iface_combination *iface_combinations;
+ u8 n_iface_combinations;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index ce7261ce8b59..756e890bc5ee 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -58,10 +58,15 @@
#define WL1271_DEFAULT_DTIM_PERIOD 1
#define WL12XX_MAX_ROLES 4
-#define WL12XX_MAX_LINKS 12
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+/*
+ * max number of links allowed by all HWs.
+ * this is NOT the actual max links supported by the current hw.
+ */
+#define WLCORE_MAX_LINKS 16
+
/* the driver supports the 2.4Ghz and 5Ghz bands */
#define WLCORE_NUM_BANDS 2
@@ -118,72 +123,58 @@ struct wl1271_chip {
#define NUM_TX_QUEUES 4
-#define AP_MAX_STATIONS 8
-
-struct wl_fw_packet_counters {
- /* Cumulative counter of released packets per AC */
- u8 tx_released_pkts[NUM_TX_QUEUES];
-
- /* Cumulative counter of freed packets per HLID */
- u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
-
- /* Cumulative counter of released Voice memory blocks */
- u8 tx_voice_released_blks;
-
- /* Tx rate of the last transmitted packet */
- u8 tx_last_rate;
-
- u8 padding[2];
-} __packed;
-
-/* FW status registers */
-struct wl_fw_status_1 {
- __le32 intr;
+struct wl_fw_status {
+ u32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
- u8 reserved;
u8 tx_results_counter;
- __le32 rx_pkt_descs[0];
-} __packed;
-
-/*
- * Each HW arch has a different number of Rx descriptors.
- * The length of the status depends on it, since it holds an array
- * of descriptors.
- */
-#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
- (sizeof(struct wl_fw_status_1) + \
- (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
- num_rx_desc)
+ __le32 *rx_pkt_descs;
-struct wl_fw_status_2 {
- __le32 fw_localtime;
+ u32 fw_localtime;
/*
* A bitmap (where each bit represents a single HLID)
* to indicate if the station is in PS mode.
*/
- __le32 link_ps_bitmap;
+ u32 link_ps_bitmap;
/*
* A bitmap (where each bit represents a single HLID) to indicate
* if the station is in Fast mode
*/
- __le32 link_fast_bitmap;
+ u32 link_fast_bitmap;
/* Cumulative counter of total released mem blocks since FW-reset */
- __le32 total_released_blks;
+ u32 total_released_blks;
/* Size (in Memory Blocks) of TX pool */
- __le32 tx_total;
+ u32 tx_total;
+
+ struct {
+ /*
+ * Cumulative counter of released packets per AC
+ * (length of the array is NUM_TX_QUEUES)
+ */
+ u8 *tx_released_pkts;
- struct wl_fw_packet_counters counters;
+ /*
+ * Cumulative counter of freed packets per HLID
+ * (length of the array is wl->num_links)
+ */
+ u8 *tx_lnk_free_pkts;
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
- __le32 log_start_addr;
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+ } counters;
+
+ u32 log_start_addr;
/* Private status to be used by the lower drivers */
- u8 priv[0];
-} __packed;
+ void *priv;
+};
#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
@@ -240,6 +231,7 @@ enum wl12xx_flags {
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
WL1271_FLAG_IO_FAILED,
+ WL1271_FLAG_REINIT_TX_WDOG,
};
enum wl12xx_vif_flags {
@@ -368,7 +360,7 @@ struct wl12xx_vif {
/* HLIDs bitmap of associated stations */
unsigned long sta_hlid_map[BITS_TO_LONGS(
- WL12XX_MAX_LINKS)];
+ WLCORE_MAX_LINKS)];
/* recoreded keys - set here before AP startup */
struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
@@ -385,7 +377,7 @@ struct wl12xx_vif {
/* counters of packets per AC, across all links in the vif */
int tx_queue_count[NUM_TX_QUEUES];
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 ssid_len;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f9daa9e183f2..2b62d799bfd8 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1281,16 +1281,10 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->rx_refill_timer.function = rx_refill_timeout;
err = -ENOMEM;
- np->stats = alloc_percpu(struct netfront_stats);
+ np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL)
goto exit;
- for_each_possible_cpu(i) {
- struct netfront_stats *xen_nf_stats;
- xen_nf_stats = per_cpu_ptr(np->stats, i);
- u64_stats_init(&xen_nf_stats->syncp);
- }
-
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c840f1..1a54f1ffaadb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
/*
+ * "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*/
- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
- !strcmp(np->type, "ht");
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
}
static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ff85450d5683..10b51106c854 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -730,46 +730,64 @@ out:
}
EXPORT_SYMBOL(of_find_node_with_property);
-static
-const struct of_device_id *__of_match_node(const struct of_device_id *matches,
- const struct device_node *node)
+static const struct of_device_id *
+of_match_compatible(const struct of_device_id *matches,
+ const struct device_node *node)
{
const char *cp;
int cplen, l;
-
- if (!matches)
- return NULL;
+ const struct of_device_id *m;
cp = __of_get_property(node, "compatible", &cplen);
- do {
- const struct of_device_id *m = matches;
-
- /* Check against matches with current compatible string */
+ while (cp && (cplen > 0)) {
+ m = matches;
while (m->name[0] || m->type[0] || m->compatible[0]) {
- int match = 1;
- if (m->name[0])
- match &= node->name
- && !strcmp(m->name, node->name);
- if (m->type[0])
- match &= node->type
- && !strcmp(m->type, node->type);
- if (m->compatible[0])
- match &= cp
- && !of_compat_cmp(m->compatible, cp,
- strlen(m->compatible));
- if (match)
+ /* Only match for the entries without type and name */
+ if (m->name[0] || m->type[0] ||
+ of_compat_cmp(m->compatible, cp,
+ strlen(m->compatible)))
+ m++;
+ else
return m;
- m++;
}
- /* Get node's next compatible string */
- if (cp) {
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
- }
- } while (cp && (cplen > 0));
+ /* Get node's next compatible string */
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ }
+
+ return NULL;
+}
+
+static
+const struct of_device_id *__of_match_node(const struct of_device_id *matches,
+ const struct device_node *node)
+{
+ const struct of_device_id *m;
+ if (!matches)
+ return NULL;
+
+ m = of_match_compatible(matches, node);
+ if (m)
+ return m;
+
+ while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
+ int match = 1;
+ if (matches->name[0])
+ match &= node->name
+ && !strcmp(matches->name, node->name);
+ if (matches->type[0])
+ match &= node->type
+ && !strcmp(matches->type, node->type);
+ if (matches->compatible[0])
+ match &= __of_device_is_compatible(node,
+ matches->compatible);
+ if (match)
+ return matches;
+ matches++;
+ }
return NULL;
}
@@ -778,10 +796,12 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
- * Low level utility function used by device matching. Matching order
- * is to compare each of the node's compatibles with all given matches
- * first. This implies node's compatible is sorted from specific to
- * generic while matches can be in any order.
+ * Low level utility function used by device matching. We have two ways
+ * of matching:
+ * - Try to find the best compatible match by comparing each compatible
+ * string of device node with all the given matches respectively.
+ * - If the above method failed, then try to match the compatible by using
+ * __of_device_is_compatible() besides the match in type and name.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 875b7b6f0d2a..5b3c24f3cde5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -24,7 +24,11 @@ MODULE_LICENSE("GPL");
static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- phydev->supported |= PHY_DEFAULT_FEATURES;
+ /* The default values for phydev->supported are provided by the PHY
+ * driver "features" member, we want to reset to sane defaults fist
+ * before supporting higher speeds.
+ */
+ phydev->supported &= PHY_DEFAULT_FEATURES;
switch (max_speed) {
default:
@@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
{
struct phy_device *phy;
bool is_c45;
- int rc, prev_irq;
+ int rc;
u32 max_speed = 0;
is_c45 = of_device_is_compatible(child,
@@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
if (!phy || IS_ERR(phy))
return 1;
- if (mdio->irq) {
- prev_irq = mdio->irq[addr];
- mdio->irq[addr] =
- irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = prev_irq;
+ rc = irq_of_parse_and_map(child, 0);
+ if (rc > 0) {
+ phy->irq = rc;
+ if (mdio->irq)
+ mdio->irq[addr] = rc;
+ } else {
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index a208a457558c..84215c1929c4 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -12,28 +12,6 @@
#include <linux/export.h>
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
- * into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
- */
-static const char *phy_modes[] = {
- [PHY_INTERFACE_MODE_NA] = "",
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_REVMII] = "rev-mii",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_SMII] = "smii",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii",
-};
-
-/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
*
@@ -49,8 +27,8 @@ int of_get_phy_mode(struct device_node *np)
if (err < 0)
return err;
- for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
- if (!strcasecmp(pm, phy_modes[i]))
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
return i;
return -ENODEV;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e2a783fdb98f..7c7a388c85ab 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -730,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
* trim_stale_devices - remove PCI devices that are not responding.
* @dev: PCI device to start walking the hierarchy from.
@@ -745,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
|| acpiphp_no_hotplug(handle);
}
if (!alive) {
@@ -792,7 +803,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
mutex_lock(&slot->crit_sect);
if (slot_no_hotplug(slot)) {
; /* do nothing */
- } else if (get_slot_status(slot) == ACPI_STA_ALL) {
+ } else if (device_status_valid(get_slot_status(slot))) {
/* remove stale devices if any */
list_for_each_entry_safe_reverse(dev, tmp,
&bus->devices, bus_list)
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 645c867c1257..5f5b0f4be5be 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -187,6 +190,9 @@ int phy_exit(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -212,6 +218,9 @@ int phy_power_on(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -240,6 +249,9 @@ int phy_power_off(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
mutex_lock(&phy->mutex);
if (phy->power_count == 1 && phy->ops->power_off) {
ret = phy->ops->power_off(phy);
@@ -308,7 +320,7 @@ err0:
*/
void phy_put(struct phy *phy)
{
- if (IS_ERR(phy))
+ if (!phy || IS_ERR(phy))
return;
module_put(phy->ops->owner);
@@ -328,6 +340,9 @@ void devm_phy_put(struct device *dev, struct phy *phy)
{
int r;
+ if (!phy)
+ return;
+
r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
@@ -411,6 +426,27 @@ struct phy *phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(phy_get);
/**
+ * phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * NULL if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(phy_optional_get);
+
+/**
* devm_phy_get() - lookup and obtain a reference to a phy.
* @dev: device that requests this phy
* @string: the phy name as given in the dt data or phy device name
@@ -441,6 +477,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(devm_phy_get);
/**
+ * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres
+ * data, then, devres data is freed. This differs to devm_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = devm_phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_optional_get);
+
+/**
* phy_create() - create a new phy
* @dev: device that is creating the new phy
* @ops: function pointers for performing phy operations
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
/*
* Voltage is measured in units of 1.22mV. The voltage is stored as
- * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ * a 12-bit number plus sign, in the upper bits of a 16-bit register
*/
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d8cb54..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(isp->phy);
goto fail0;
}
- if (!isp->phy)
- goto fail0;
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..b14ebdad5dd2 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
/* Only LDO 5 and 6 has got the over current interrupt */
if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
irq = platform_get_irq_byname(pdev, "REGULATOR");
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
da9055_ldo5_6_oc_irq,
IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index b1078ba3f393..186df8785a91 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -168,10 +168,11 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
MAX14577_REG_MAX);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
}
- return 0;
+ of_node_put(np);
+
+ return ret;
}
static inline struct regulator_init_data *match_init_data(int index)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 9e80d61e5a3a..2eb97d7e8d12 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2595,8 +2595,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- INIT_LIST_HEAD(&cmd->cmd_list);
-
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 1d10eecad499..66e755cdde57 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
- struct list_head cmd_list;
struct atio_from_isp atio;
};
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..62ec84b42e31 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
+ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ba9310bc9acb..581ee2a8856b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
+ tristate "Renesas RSPI/QSPI controller"
depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 50406306bc20..bae97ffec4b9 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
init_completion(&hw->done);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0f9036..d0b28bba38be 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work)
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
- "failed to transfer one message from queue: %d\n", ret);
- master->cur_msg->status = ret;
- spi_finalize_current_message(master);
+ "failed to transfer one message from queue\n");
return;
}
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -498,6 +506,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd370b30f..ee3a7380e53b 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@ struct compat_ion_custom_data {
compat_ulong_t arg;
};
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data(
return err;
}
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
case COMPAT_ION_IOC_FREE:
{
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (data == NULL)
return -EFAULT;
- err = compat_get_ion_allocation_data(data32, data);
+ err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002753f2..01cdc8aee898 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,9 +17,11 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include "ion.h"
#include "ion_priv.h"
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = {
};
struct ion_platform_data dummy_ion_pdata = {
- .nr = 4,
+ .nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void)
heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
GFP_KERNEL);
if (!heaps)
- return PTR_ERR(heaps);
+ return -ENOMEM;
/* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@ err:
}
return err;
}
+device_initcall(ion_dummy_init);
static void __exit ion_dummy_exit(void)
{
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void)
return;
}
-
-module_init(ion_dummy_init);
-module_exit(ion_dummy_exit);
-
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f98dc0..37e64d51394c 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_RET(heap->task);
}
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d98673981cc4..fc2e4fccf69d 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f0729130d65..9849f3963e75 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
info->page = page;
info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
return info;
}
kfree(info);
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
if (align > PAGE_SIZE)
return -EINVAL;
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining,
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..5aaf71d6974b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* __KERNEL __ */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd2732bdc..95a2358267ba 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
}
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 246080316c90..5b15033a94bf 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device,
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- if (ret < 0)
- comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676cf706a..d9ad2c0fdda2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, chan, range, ofs;
chan = CR_CHAN(insn->chanspec);
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
ofs = PCI171x_DA1;
}
+ val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++)
- outw(data[n], dev->iobase + ofs);
+ for (n = 0; n < insn->n; n++) {
+ val = data[n];
+ outw(val, dev->iobase + ofs);
+ }
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, rangereg, chan;
chan = CR_CHAN(insn->chanspec);
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
outb(rangereg, dev->iobase + PCI1720_RANGE);
devpriv->da_ranges = rangereg;
}
+ val = devpriv->ao_data[chan];
for (n = 0; n < insn->n; n++) {
- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+ val = data[n];
+ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb1254152..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
+#include <asm/unaligned.h>
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(get_unaligned((uint32_t
+ *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89eca44..33ac7fb88cbd 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@ done:
return rtn;
}
-/*
- * Common Packet Handling code
- */
-
-static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
- long dlen, long plen, int n1, u8 *dbuf)
-{
- char *error;
- long n;
- long remain;
- u8 *buf;
- u8 *b;
-
- remain = nd->nd_remain;
- nd->nd_tx_work = 1;
-
- /*
- * Otherwise data should appear only when we are
- * in the CS_READY state.
- */
-
- if (ch->ch_state < CS_READY) {
- error = "Data received before RWIN established";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * Assure that the data received is within the
- * allowable window.
- */
-
- n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
- if (dlen > n) {
- error = "Receive data overrun";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * If we received 3 or less characters,
- * assume it is a human typing, and set RTIME
- * to 10 milliseconds.
- *
- * If we receive 10 or more characters,
- * assume its not a human typing, and set RTIME
- * to 100 milliseconds.
- */
-
- if (ch->ch_edelay != DGRP_RTIME) {
- if (ch->ch_rtime != ch->ch_edelay) {
- ch->ch_rtime = ch->ch_edelay;
- ch->ch_flag |= CH_PARAM;
- }
- } else if (dlen <= 3) {
- if (ch->ch_rtime != 10) {
- ch->ch_rtime = 10;
- ch->ch_flag |= CH_PARAM;
- }
- } else {
- if (ch->ch_rtime != DGRP_RTIME) {
- ch->ch_rtime = DGRP_RTIME;
- ch->ch_flag |= CH_PARAM;
- }
- }
-
- /*
- * If a portion of the packet is outside the
- * buffer, shorten the effective length of the
- * data packet to be the amount of data received.
- */
-
- if (remain < plen)
- dlen -= plen - remain;
-
- /*
- * Detect if receive flush is now complete.
- */
-
- if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
- ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
- ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
- ch->ch_flag &= ~CH_RX_FLUSH;
- }
-
- /*
- * If we are ready to receive, move the data into
- * the receive buffer.
- */
-
- ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
- if (ch->ch_state == CS_READY &&
- (ch->ch_tun.un_open_count != 0) &&
- (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
- (ch->ch_cflag & CF_CREAD) != 0 &&
- (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
- (ch->ch_send & RR_RX_FLUSH) == 0) {
-
- if (ch->ch_rin + dlen >= RBUF_MAX) {
- n = RBUF_MAX - ch->ch_rin;
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
- ch->ch_rin = 0;
- dbuf += n;
- dlen -= n;
- }
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
- ch->ch_rin += dlen;
-
-
- /*
- * If we are not in fastcook mode, or
- * if there is a fastcook thread
- * waiting for data, send the data to
- * the line discipline.
- */
-
- if ((ch->ch_flag & CH_FAST_READ) == 0 ||
- ch->ch_inwait != 0) {
- dgrp_input(ch);
- }
-
- /*
- * If there is a read thread waiting
- * in select, and we are in fastcook
- * mode, wake him up.
- */
-
- if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
- (ch->ch_flag & CH_FAST_READ) != 0)
- wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
- /*
- * Wake any thread waiting in the
- * fastcook loop.
- */
-
- if ((ch->ch_flag & CH_INPUT) != 0) {
- ch->ch_flag &= ~CH_INPUT;
- wake_up_interruptible(&ch->ch_flag_wait);
- }
- }
-
- /*
- * Fabricate and insert a data packet header to
- * preced the remaining data when it comes in.
- */
-
- if (remain < plen) {
- dlen = plen - remain;
- b = buf;
-
- b[0] = 0x90 + n1;
- put_unaligned_be16(dlen, b + 1);
-
- remain = 3;
- if (remain > 0 && b != buf)
- memcpy(buf, b, remain);
-
- nd->nd_remain = remain;
- return;
- }
-}
-
/**
* dgrp_receive() -- decode data packets received from the remote PortServer.
* @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 1;
dbuf = b + 1;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 2;
dbuf = b + 2;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd)
dbuf = b + 3;
+ /*
+ * Common packet handling code.
+ */
+
+data:
+ nd->nd_tx_work = 1;
+
+ /*
+ * Otherwise data should appear only when we are
+ * in the CS_READY state.
+ */
+
+ if (ch->ch_state < CS_READY) {
+ error = "Data received before RWIN established";
+ goto prot_error;
+ }
+
+ /*
+ * Assure that the data received is within the
+ * allowable window.
+ */
+
+ n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+ if (dlen > n) {
+ error = "Receive data overrun";
+ goto prot_error;
+ }
+
+ /*
+ * If we received 3 or less characters,
+ * assume it is a human typing, and set RTIME
+ * to 10 milliseconds.
+ *
+ * If we receive 10 or more characters,
+ * assume its not a human typing, and set RTIME
+ * to 100 milliseconds.
+ */
+
+ if (ch->ch_edelay != DGRP_RTIME) {
+ if (ch->ch_rtime != ch->ch_edelay) {
+ ch->ch_rtime = ch->ch_edelay;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else if (dlen <= 3) {
+ if (ch->ch_rtime != 10) {
+ ch->ch_rtime = 10;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else {
+ if (ch->ch_rtime != DGRP_RTIME) {
+ ch->ch_rtime = DGRP_RTIME;
+ ch->ch_flag |= CH_PARAM;
+ }
+ }
+
+ /*
+ * If a portion of the packet is outside the
+ * buffer, shorten the effective length of the
+ * data packet to be the amount of data received.
+ */
+
+ if (remain < plen)
+ dlen -= plen - remain;
+
+ /*
+ * Detect if receive flush is now complete.
+ */
+
+ if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+ ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+ ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
+ ch->ch_flag &= ~CH_RX_FLUSH;
+ }
+
+ /*
+ * If we are ready to receive, move the data into
+ * the receive buffer.
+ */
+
+ ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+ if (ch->ch_state == CS_READY &&
+ (ch->ch_tun.un_open_count != 0) &&
+ (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+ (ch->ch_cflag & CF_CREAD) != 0 &&
+ (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+ (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+ if (ch->ch_rin + dlen >= RBUF_MAX) {
+ n = RBUF_MAX - ch->ch_rin;
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+ ch->ch_rin = 0;
+ dbuf += n;
+ dlen -= n;
+ }
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+ ch->ch_rin += dlen;
+
+
+ /*
+ * If we are not in fastcook mode, or
+ * if there is a fastcook thread
+ * waiting for data, send the data to
+ * the line discipline.
+ */
+
+ if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+ ch->ch_inwait != 0) {
+ dgrp_input(ch);
+ }
+
+ /*
+ * If there is a read thread waiting
+ * in select, and we are in fastcook
+ * mode, wake him up.
+ */
+
+ if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+ (ch->ch_flag & CH_FAST_READ) != 0)
+ wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+ /*
+ * Wake any thread waiting in the
+ * fastcook loop.
+ */
+
+ if ((ch->ch_flag & CH_INPUT) != 0) {
+ ch->ch_flag &= ~CH_INPUT;
+
+ wake_up_interruptible(&ch->ch_flag_wait);
+ }
+ }
+
+ /*
+ * Fabricate and insert a data packet header to
+ * preced the remaining data when it comes in.
+ */
+
+ if (remain < plen) {
+ dlen = plen - remain;
+ b = buf;
+
+ b[0] = 0x90 + n1;
+ put_unaligned_be16(dlen, b + 1);
+
+ remain = 3;
+ goto done;
+ }
break;
/*
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf0a7d3..cdeffe75496b 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
ret = register_wimax_device(phy_dev, &intf->dev);
+ if (ret)
+ release_usb(udev);
out:
if (ret) {
kfree(phy_dev);
kfree(udev);
+ usb_put_dev(usbdev);
} else {
usb_set_intfdata(intf, phy_dev);
}
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..c9fedb79e3a2 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
uint64_t mask;
unsigned be;
unsigned is_signed;
- unsigned enabled;
unsigned location;
};
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
while (ent = readdir(dp), ent != NULL) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
+ int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
ret = -errno;
goto error_cleanup_array;
}
- fscanf(sysfsfp, "%u", &current->enabled);
+ fscanf(sysfsfp, "%u", &current_enabled);
fclose(sysfsfp);
- if (!current->enabled) {
+ if (!current_enabled) {
free(filename);
count--;
continue;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea36410f716..5708ffc62aec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = {
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
.event_spec = _ev_spec, \
.num_event_specs = _num_ev_spec, \
}
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq > 0)
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669bb60e..7fc66a6a6e36 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1035,8 +1035,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4);
SHOW_SCALE_AVAILABLE_ATTR(5);
SHOW_SCALE_AVAILABLE_ATTR(6);
SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(8);
-SHOW_SCALE_AVAILABLE_ATTR(9);
SHOW_SCALE_AVAILABLE_ATTR(10);
SHOW_SCALE_AVAILABLE_ATTR(11);
SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1051,6 @@ static struct attribute *mxs_lradc_attributes[] = {
&iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
* of the array.
*/
scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
- (iio->channels[i].scan_type.realbits - s);
+ (LRADC_RESOLUTION - s);
lradc->scale_avail[i][s].nano =
do_div(scale_uv, 100000000) * 10;
lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate(indio_dev);
- if (buffer)
+ if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8bae6..236ed66f116a 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_vblank_cleanup(imxdrm->drm);
- drm_kms_helper_poll_fini(imxdrm->drm);
- drm_mode_config_cleanup(imxdrm->drm);
+ drm_vblank_cleanup(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
return 0;
}
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister(
}
/*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- int ret;
-
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- return ret;
-
- drm_crtc_helper_add(imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
* Called by the CRTC driver when all CRTCs are registered. This
* puts all the pieces together and initializes the driver.
* Once this is called no more CRTCs can be registered since
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
mutex_lock(&imxdrm->mutex);
- drm_kms_helper_poll_init(imxdrm->drm);
+ drm_kms_helper_poll_init(drm);
/* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(imxdrm->drm,
- &imxdrm->drm->primary->mode_group);
+ ret = drm_mode_group_init_legacy_group(drm,
+ &drm->primary->mode_group);
if (ret)
goto err_kms;
- ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+ ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
goto err_kms;
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = true;
+ drm->vblank_disable_allowed = true;
if (!imx_drm_device_get()) {
ret = -EINVAL;
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = imx_drm_crtc_register(imx_drm_crtc);
+ ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
goto err_register;
+ drm_crtc_helper_add(crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+
+ drm_crtc_init(imxdrm->drm, crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
+ drm_mode_group_reinit(imxdrm->drm);
+
imx_drm_update_possible_crtcs();
mutex_unlock(&imxdrm->mutex);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e2e492..62ce0e86f14b 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/hdmi.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +53,6 @@ enum hdmi_datamap {
YCbCr422_12B = 0x12,
};
-enum hdmi_colorimetry {
- ITU601,
- ITU709,
-};
-
enum imx_hdmi_devtype {
IMX6Q_HDMI,
IMX6DL_HDMI,
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
if (is_color_space_conversion(hdmi)) {
if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
} else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
/* Set up colorimetry */
if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
} else if (hdmi->hdmi_data.enc_out_format != RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
} else { /* Carries no data */
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
(hdmi->vic == 21) || (hdmi->vic == 22) ||
(hdmi->vic == 2) || (hdmi->vic == 3) ||
(hdmi->vic == 17) || (hdmi->vic == 18))
- hdmi->hdmi_data.colorimetry = ITU601;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
- hdmi->hdmi_data.colorimetry = ITU709;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
(hdmi->vic == 12) || (hdmi->vic == 13) ||
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
* Other minor misc cleanups...
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
__u16 kuc_msglen; /* Including header */
} __attribute__((aligned(sizeof(__u64))));
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
#define KUC_MAGIC 0x191C /*Lustre9etLinC */
#define KUC_FL_BLOCK 0x01 /* Wait for send */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
do { \
LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & GFP_ATOMIC)) != 0); \
+ ((mask) & __GFP_WAIT) == 0)); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 93648632ba26..6f58ead20393 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (is_vmalloc_addr(vaddr)) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..b7b53b579c85 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- int mpflag = 0;
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
- if (lntmsg->msg_vmflush)
+ if (!mpflag)
cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240e824..7893d83e131f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+ sizeof(struct changelog_ext_rec))
struct changelog_rec {
__u16 cr_namelen;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc95bc5..52b7731bcc38 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
break;
case Q_GETQUOTA:
if (((type == USRQUOTA &&
- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
(!cfs_capable(CFS_CAP_SYS_ADMIN) ||
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c34ddc..83013927e131 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
- LASSERT(len <= CR_MAXSIZE);
+ LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata)
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
@@ -1540,7 +1540,7 @@ out:
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed17e39..31b269a5fff7 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
}
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return (u16)smp_processor_id();
}
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (priv->mii_bus->irq == NULL) {
+ pr_err("irq alloc failed\n");
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
/* Scan only the enabled address */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91238a1..5a001d9b4252 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
*/
#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-static int octeon_usb_get_clock_type(void)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
-}
-
/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void)
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
- int usb_port_number)
+ int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- enum cvmx_usb_initialize_flags flags = 0;
int i;
/* At first allow 0-1 for the usb port number */
if ((usb_port_number < 0) || (usb_port_number > 1))
return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
- /* Only 12 MHZ crystals are supported */
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- } else {
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return 0;
}
-
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = {
.hub_control = octeon_usb_hub_control,
};
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
- int usb_num = to_platform_device(dev)->id;
- int irq = platform_get_irq(to_platform_device(dev), 0);
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
struct octeon_hcd *priv;
struct usb_hcd *hcd;
unsigned long flags;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (dev->of_node == NULL) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+ return -ENXIO;
+
+ }
+
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+ irq = irq_create_mapping(NULL, hwirq);
+ }
/*
* Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev)
dev->coherent_dma_mask = ~0;
dev->dma_mask = &dev->coherent_dma_mask;
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev)
tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num);
+ status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev)
cvmx_usb_poll(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
- status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
kfree(hcd);
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev)
}
device_wakeup_enable(hcd->self.controller);
- dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
-static int octeon_usb_driver_remove(struct device *dev)
+static int octeon_usb_remove(struct platform_device *pdev)
{
int status;
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev)
return 0;
}
-static struct device_driver octeon_usb_driver = {
- .name = "OcteonUSB",
- .bus = &platform_bus_type,
- .probe = octeon_usb_driver_probe,
- .remove = octeon_usb_driver_remove,
+static struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
};
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "OcteonUSB",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
-#define MAX_USB_PORTS 10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
+static int __init octeon_usb_driver_init(void)
{
- int num_devices = cvmx_usb_get_num_ports();
- int device;
-
- if (usb_disabled() || num_devices == 0)
- return -ENODEV;
-
- if (driver_register(&octeon_usb_driver))
- return -ENOMEM;
-
- octeon_usb_registered = 1;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- for (device = 0; device < num_devices; device++) {
- struct resource irq_resource;
- struct platform_device *pdev;
- memset(&irq_resource, 0, sizeof(irq_resource));
- irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
- irq_resource.end = irq_resource.start;
- irq_resource.flags = IORESOURCE_IRQ;
- pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
- if (IS_ERR(pdev)) {
- driver_unregister(&octeon_usb_driver);
- octeon_usb_registered = 0;
- return PTR_ERR(pdev);
- }
- if (device < MAX_USB_PORTS)
- pdev_glob[device] = pdev;
+ if (usb_disabled())
+ return 0;
- }
- return 0;
+ return platform_driver_register(&octeon_usb_driver);
}
+module_init(octeon_usb_driver_init);
-static void __exit octeon_usb_module_cleanup(void)
+static void __exit octeon_usb_driver_exit(void)
{
- int i;
+ if (usb_disabled())
+ return;
- for (i = 0; i < MAX_USB_PORTS; i++)
- if (pdev_glob[i]) {
- platform_device_unregister(pdev_glob[i]);
- pdev_glob[i] = NULL;
- }
- if (octeon_usb_registered)
- driver_unregister(&octeon_usb_driver);
+ platform_driver_unregister(&octeon_usb_driver);
}
+module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index cb060364dfe7..5d965cf06d59 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev)
if (binding) {
binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
binding->ptype.func = oz_pkt_recv;
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev)
}
} else {
oz_dbg(ON, "Binding to all netcards\n");
+ memset(binding->name, 0, OZ_MAX_BINDING_LEN);
binding->ptype.dev = NULL;
}
if (binding) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..96df62f95b6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
- DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec992569476..4ad80ae1067f 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
poidparam->subcode, poidparam->len, len));
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
ret = -EINVAL;
goto _rtw_mp_ioctl_hdl_exit;
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[17 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+ u8 go_devadd_str[17 + 12] = {};
/* Commented by Albert 20121209 */
/* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
else
- sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
return -EFAULT;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa114d2..7c9ee58f47bb 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6ec51f..a70dcef1419e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
/*=== Customer ID ===*/
/****** 8188EUS ********/
- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
index 2aa5dac2f1df..abccc9dabd65 100644
--- a/drivers/staging/rtl8821ae/Kconfig
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -1,6 +1,6 @@
config R8821AE
tristate "RealTek RTL8821AE Wireless LAN NIC driver"
- depends on PCI && WLAN
+ depends on PCI && WLAN && MAC80211
depends on m
select WIRELESS_EXT
select WEXT_PRIV
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
index d387f13ea7dc..0cc32c60ddee 100644
--- a/drivers/staging/rtl8821ae/rc.c
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -286,7 +286,6 @@ static void rtl_rate_free_sta(void *rtlpriv,
}
static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
index cfe88a1efd55..76bef93ad70a 100644
--- a/drivers/staging/rtl8821ae/wifi.h
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -1414,7 +1414,7 @@ struct rtl_dm {
/*88e tx power tracking*/
- u8 bb_swing_idx_ofdm[2];
+ u8 bb_swing_idx_ofdm[MAX_RF_PATH];
u8 bb_swing_idx_ofdm_current;
u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
bool bb_swing_flag_Ofdm;
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
struct pool *p;
p = calloc(1, sizeof(struct pool));
- if (!p) {
- free(p);
+ if (!p)
return NULL;
- }
p->mem = calloc(1, size);
- if (!p->mem)
+ if (!p->mem) {
+ free(p);
return NULL;
+ }
p->next = pool_head;
pool_head = p;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..0141bc34d5cc 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_WIRELESS:
break;
default:
- pr_err("speed %d\n", speed);
+ pr_err("Failed attach request for unsupported USB speed: %s\n",
+ usb_speed_string(speed));
return -EINVAL;
}
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf5e00f..187fc060de26 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
goto out;
}
- if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+ if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d6439f4a..cda4d80cfaef 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack(
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 12da9b386169..c3d9df6aaf5f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent(
if (segment_mult) {
u64 tmp = lba;
- start_lba = sector_div(tmp, segment_size * segment_mult);
+ start_lba = do_div(tmp, segment_size * segment_mult);
last_lba = first_lba + segment_size - 1;
if (start_lba >= first_lba &&
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d77932c80..3013287a2aaa 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
- int pr_holder = 0;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
+ type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Release the calling I_T Nexus registration now..
*/
__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
out:
- core_scsi3_put_pr_reg(pr_reg);
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
return ret;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index fa3cae393e13..a4489444ffbc 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1074,12 +1074,19 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
struct scatterlist *psg;
void *paddr, *addr;
unsigned int i, len, left;
+ unsigned int offset = 0;
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
len = min(psg->length, left);
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ sg_off = sg->offset;
+ }
+
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
addr = kmap_atomic(sg_page(sg)) + sg_off;
@@ -1089,6 +1096,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
memcpy(addr, paddr, len);
left -= len;
+ offset += len;
kunmap_atomic(paddr);
kunmap_atomic(addr);
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 43c5ca9878bc..3bebc71ea033 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -440,8 +440,8 @@ check_scsi_name:
padding = ((-scsi_target_len) & 3);
if (padding)
scsi_target_len += padding;
- if (scsi_name_len > 256)
- scsi_name_len = 256;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
buf[off-1] = scsi_target_len;
off += scsi_target_len;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c50fd9f11aab..24b4f65d8777 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
@@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872e2e47..b01659bd4f7c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void)
/* Register as a vio device to receive callbacks */
return platform_driver_register(&hvc_opal_driver);
}
-module_init(hvc_opal_init);
-
-static void __exit hvc_opal_exit(void)
-{
- platform_driver_unregister(&hvc_opal_driver);
-}
-module_exit(hvc_opal_exit);
+device_initcall(hvc_opal_init);
static void udbg_opal_putc(char c)
{
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb86ba49..08c87920b74a 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void)
return 0;
}
-module_init(hvc_rtas_init);
-
-/* This will tear down the tty portion of the driver */
-static void __exit hvc_rtas_exit(void)
-{
- /* Really the fun isn't over until the worker thread breaks down and
- * the tty cleans up */
- if (hvc_rtas_dev)
- hvc_remove(hvc_rtas_dev);
-}
-module_exit(hvc_rtas_exit);
+device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 72228276fe31..9cf573d06a29 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void)
return 0;
}
-module_init(hvc_udbg_init);
-
-static void __exit hvc_udbg_exit(void)
-{
- if (hvc_udbg_dev)
- hvc_remove(hvc_udbg_dev);
-}
-module_exit(hvc_udbg_exit);
+device_initcall(hvc_udbg_init);
static int __init hvc_udbg_console_init(void)
{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9baad7a5..2dc2831840ca 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void)
#endif
return r;
}
-
-static void __exit xen_hvc_fini(void)
-{
- struct xencons_info *entry, *next;
-
- if (list_empty(&xenconsoles))
- return;
-
- list_for_each_entry_safe(entry, next, &xenconsoles, list) {
- xen_console_remove(entry);
- }
-}
+device_initcall(xen_hvc_init);
static int xen_cons_init(void)
{
@@ -598,10 +587,6 @@ static int xen_cons_init(void)
hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
-
-
-module_init(xen_hvc_init);
-module_exit(xen_hvc_fini);
console_initcall(xen_cons_init);
#ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f34461c5f14e..2ebe47b78a3e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
+ unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
u8 *dp = data;
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
+ if (len > 0) {
+ while (gsm_read_ea(&brk, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ modem <<= 7;
+ modem |= (brk & 0x7f);
+ }
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cb8017aa4434..d15624c1b751 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
- if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
- ldata->echo_mark == ldata->echo_tail)
+ if (ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
if (L_ECHO(tty)) {
echo_char(c, tty);
commit_echoes(tty);
- }
+ } else
+ process_echoes(tty);
isig(signal, tty);
return;
}
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
start_tty(tty);
- commit_echoes(tty);
+ process_echoes(tty);
return 0;
}
if (c == STOP_CHAR(tty)) {
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
start_tty(tty);
+ process_echoes(tty);
+ }
/* The termios change make the tty ready for I/O */
if (waitqueue_active(&tty->write_wait))
@@ -1896,7 +1898,7 @@ err:
static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
- int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1;
+ int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty)) {
if (ldata->canon_head != ldata->read_tail)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61ecd709a722..69932b7556cf 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_dl_write(up, quot);
/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ *
+ * We need to recalculate all of the registers, because DLM and DLL
+ * are already rounded to a whole integer.
+ *
+ * When recalculating we use a 32x clock instead of a 16x clock to
+ * allow 1-bit for rounding in the fractional part.
+ */
+ if (up->port.type == PORT_XR17V35X) {
+ unsigned int baud_x32 = (port->uartclk * 2) / baud;
+ u16 quot = baud_x32 / 32;
+ u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
+
+ serial_dl_write(up, quot);
+ serial_port_out(port, 0x2, quot_frac & 0xf);
+ }
+
+ /*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
* is written without DLAB set, this mode will be disabled.
*/
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index faa64e646100..ed3113576740 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int dw8250_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 50228eed3b6f..0ff3e3624d4c 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv,
{
unsigned int bar;
- if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+ if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
+ (priv->dev->subsystem_device & 0xff00) == 0x3000) {
/* netmos apparently orders BARs by datasheet layout, so serial
* ports get BARs 0 and 3 (or 1 and 4 for memmapped)
*/
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511ebab67c..77f035158d6c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port)
return retval;
}
disable_irq(up->wakeirq);
- } else {
- dev_info(up->port.dev, "no wakeirq for uart%d\n",
- up->port.line);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
flags & SER_RS485_RTS_AFTER_SEND);
if (ret < 0)
return ret;
- } else
+ } else if (up->rts_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
up->rts_gpio = -EINVAL;
+ }
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->wakeirq = wakeirq;
+ if (!up->wakeirq)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
up->port.regshift = 2;
up->port.fifosize = 64;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 49a2ffd101a7..b7bfe24d4ebc 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ spin_lock(&port->lock);
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock(&port->lock);
if (sirfport->rx_io_count == 4) {
spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c74a00ad7add..bd2715a9d8e5 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1267,16 +1267,17 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
* @p: output buffer of at least 7 bytes
*
* Generate a name from a driver reference and write it to the output
- * buffer.
+ * buffer. Return the number of bytes written.
*
* Locking: None
*/
-static void tty_line_name(struct tty_driver *driver, int index, char *p)
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
- strcpy(p, driver->name);
+ return sprintf(p, "%s", driver->name);
else
- sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ return sprintf(p, "%s%d", driver->name,
+ index + driver->name_base);
}
/**
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
if (i >= ARRAY_SIZE(cs))
break;
}
- while (i--)
- count += sprintf(buf + count, "%s%d%c",
- cs[i]->name, cs[i]->index, i ? ' ':'\n');
+ while (i--) {
+ struct tty_driver *driver;
+ const char *name = cs[i]->name;
+ int index = cs[i]->index;
+
+ driver = cs[i]->device(cs[i], &index);
+ if (driver) {
+ count += tty_line_name(driver, index, buf + count);
+ count += sprintf(buf + count, "%c", i ? ' ':'\n');
+ } else
+ count += sprintf(buf + count, "%s%d%c",
+ name, index, i ? ' ':'\n');
+ }
console_unlock();
return count;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137d7e56..23b5d32954bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
/* fall through */
case 2: /* erase whole display */
count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5d01558cef66..ab90a0156828 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
if (fields > 2 && bInterfaceClass) {
- if (bInterfaceClass > 255)
- return -EINVAL;
+ if (bInterfaceClass > 255) {
+ retval = -EINVAL;
+ goto fail;
+ }
dynid->id.bInterfaceClass = (u8)bInterfaceClass;
dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (fields > 4) {
const struct usb_device_id *id = id_table;
- if (!id)
- return -ENODEV;
+ if (!id) {
+ retval = -ENODEV;
+ goto fail;
+ }
for (; id->match_flags; id++)
if (id->idVendor == refVendor && id->idProduct == refProduct)
break;
- if (id->match_flags)
+ if (id->match_flags) {
dynid->id.driver_info = id->driver_info;
- else
- return -ENODEV;
+ } else {
+ retval = -ENODEV;
+ goto fail;
+ }
}
spin_lock(&dynids->lock);
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (retval)
return retval;
return count;
+
+fail:
+ kfree(dynid);
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_store_new_id);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 199aaea6bfe0..2518c3250750 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return retval;
}
- usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
}
retval = usb_new_device (usb_dev);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index babba885978d..64ea21971be2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-int usb_device_supports_lpm(struct usb_device *udev)
+static int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev)
"Power management will be impacted.\n");
return 0;
}
-
- /* udev is root hub */
- if (!udev->parent)
- return 1;
-
if (udev->parent->lpm_capable)
return 1;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c49383669cd8..823857767a16 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
-extern int usb_device_supports_lpm(struct usb_device *udev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8565d87f94b4..1d129884cc39 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
int retval = 0;
if (!select_phy)
- return -ENODEV;
+ return 0;
usbcfg = readl(hsotg->regs + GUSBCFG);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f59484d43b35..4d918ed8d343 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int is_control = usb_endpoint_xfer_control(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- struct usb_device *udev;
unsigned long flags;
dev_dbg(hsotg->dev,
"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
ep->desc.bEndpointAddress);
- udev = to_usb_device(hsotg->dev);
-
spin_lock_irqsave(&hsotg->lock, flags);
-
- usb_settoggle(udev, epnum, is_out, 0);
- if (is_control)
- usb_settoggle(udev, epnum, !is_out, 0);
dwc2_hcd_endpoint_reset(hsotg, ep);
-
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index d01d0d3f2cf0..eaba547ce26b 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
int retval;
int irq;
+ if (usb_disabled())
+ return -ENODEV;
+
match = of_match_device(dwc2_of_match_table, &dev->dev);
if (match && match->data) {
params = match->data;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index b016d38199f2..eb009a457fb5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
addr, (unsigned int)temp);
addr = &ir_set->erst_base;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, temp_64);
addr = &ir_set->erst_dequeue;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, temp_64);
}
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{
u64 val;
- val = readq(&xhci->op_regs->cmd_ring);
+ val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 873c272b3ef5..bce4391a0e7d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = readq(&xhci->ir_set->erst_dequeue);
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
@@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, "
"preserving EHB bit");
- writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
@@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- writeq(dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%x", val);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
@@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = readq(&xhci->ir_set->erst_base);
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- writeq(val_64, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3c898c12a06b..04f986d9234f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a0b248c34526..0ed64eb68e48 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
return 0;
}
- temp_64 = readq(&xhci->op_regs->cmd_ring);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (!(temp_64 & CMD_RING_RUNNING)) {
xhci_dbg(xhci, "Command ring had been stopped\n");
return 0;
}
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
- writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should
* time the completion od all xHCI commands, including
@@ -2864,8 +2865,9 @@ hw_died:
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
- writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
+ &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
@@ -2877,7 +2879,7 @@ hw_died:
*/
while (xhci_handle_event(xhci) > 0) {}
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -2892,7 +2894,7 @@ hw_died:
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
- writeq(temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
@@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs)) {
- union xhci_trb *trb = ep_ring->enqueue;
- unsigned int usable = ep_ring->enq_seg->trbs +
- TRBS_PER_SEGMENT - 1 - trb;
- u32 nop_cmd;
-
- /*
- * Section 4.11.7.1 TD Fragments states that a link
- * TRB must only occur at the boundary between
- * data bursts (eg 512 bytes for 480M).
- * While it is possible to split a large fragment
- * we don't know the size yet.
- * Simplest solution is to fill the trb before the
- * LINK with nop commands.
- */
- if (num_trbs == 1 || num_trbs <= usable || usable == 0)
- break;
-
- if (ep_ring->type != TYPE_BULK)
- /*
- * While isoc transfers might have a buffer that
- * crosses a 64k boundary it is unlikely.
- * Since we can't add NOPs without generating
- * gaps in the traffic just hope it never
- * happens at the end of the ring.
- * This could be fixed by writing a LINK TRB
- * instead of the first NOP - however the
- * TRB_TYPE_LINK_LE32() calls would all need
- * changing to check the ring length.
- */
- break;
-
- if (num_trbs >= TRBS_PER_SEGMENT) {
- xhci_err(xhci, "Too many fragments %d, max %d\n",
- num_trbs, TRBS_PER_SEGMENT - 1);
- return -EINVAL;
- }
-
- nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
- ep_ring->cycle_state);
- ep_ring->num_trbs_free -= usable;
- do {
- trb->generic.field[0] = 0;
- trb->generic.field[1] = 0;
- trb->generic.field[2] = 0;
- trb->generic.field[3] = nop_cmd;
- trb++;
- } while (--usable);
- ep_ring->enqueue = trb;
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
- }
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad364394885a..6fe577d46fa2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
{
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
- xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr);
+ xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
- xhci->s3.erst_base = readq(&xhci->ir_set->erst_base);
- xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue);
+ xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+ xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
}
@@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
{
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
- writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
- writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base);
- writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
@@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
u64 val_64;
/* step 2: initialize command ring buffer */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue) &
@@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
/*
@@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
if (ret) {
return ret;
}
- temp_64 = readq(&xhci->op_regs->dcbaa_ptr);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -4730,11 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
struct device *dev = hcd->self.controller;
int retval;
- /* Limit the block layer scatter-gather lists to half a segment. */
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
-
- /* support to build packet from discontinuous buffers */
- hcd->self.no_sg_constraint = 1;
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
@@ -4760,6 +4757,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
+ xhci = hcd_to_xhci(hcd);
+ /*
+ * Support arbitrarily aligned sg-list entries on hosts without
+ * TD fragment rules (which are currently unsupported).
+ */
+ if (xhci->hci_version < 0x100)
+ hcd->self.no_sg_constraint = 1;
+
return 0;
}
@@ -4788,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ if (xhci->hci_version < 0x100)
+ hcd->self.no_sg_constraint = 1;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8416639bf31..58ed9d088e63 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -28,17 +28,6 @@
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
-/*
- * Registers should always be accessed with double word or quad word accesses.
- *
- * Some xHCI implementations may support 64-bit address pointers. Registers
- * with 64-bit address pointers should be written to with dword accesses by
- * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
- * xHCI implementations that do not support 64-bit address pointers will ignore
- * the high dword, and write order is irrelevant.
- */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
-
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
@@ -1279,7 +1268,7 @@ union xhci_trb {
* since the command ring is 64-byte aligned.
* It must also be greater than 16.
*/
-#define TRBS_PER_SEGMENT 256
+#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
@@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
#define xhci_warn_ratelimited(xhci, fmt, args...) \
dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+ __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u64 val_lo = readl(ptr);
+ u64 val_hi = readl(ptr + 1);
+ return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+ const u64 val, __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u32 val_lo = lower_32_bits(val);
+ u32 val_hi = upper_32_bits(val);
+
+ writel(val_lo, ptr);
+ writel(val_hi, ptr + 1);
+}
+
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
return xhci->quirks & XHCI_LINK_TRB_QUIRK;
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index e6f61e4361df..8afa813d690b 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver of type %s\n",
+ pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
goto err0;
}
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver\n");
+ dev_dbg(dev, "unable to find transceiver\n");
goto err0;
}
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index,
unsigned long flags;
phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
- if (!phy_bind) {
- pr_err("phy_bind(): No memory for phy_bind");
+ if (!phy_bind)
return -ENOMEM;
- }
phy_bind->dev_name = dev_name;
phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ce0d7b0db012..ee1f00f03c43 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..1e2d369df86e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
#define TI_XDS100V2_PID 0xa6d0
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
/* US Interface Navigator (http://www.usinterface.com/) */
#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
+
/* an infrared receiver for user access control with IR tags */
#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5c86f57e4afa..216d20affba8 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437cfd4a2..968a40201e5f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index f112b079ddfc..fb79775447b0 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
- { USB_DEVICE(0x0fcf, 0x1008) }
+ { USB_DEVICE(0x0fcf, 0x1008) }, \
+ { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
/* Siemens USB/MPI adapter */
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b114f2..1dd0604d1911 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
+ (BLK_DEV_SD) for most USB storage devices. Some devices also
+ will require 'Probe all LUNs on each SCSI device'
+ (SCSI_MULTI_LUN).
To compile this driver as a module, choose M here: the
module will be called usb-storage.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6c21ab..9d38ddc8da49 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
+ struct us_data *us = host_to_us(sdev->host);
+
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Tell the SCSI layer if we know there is more than one LUN */
+ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ sdev->sdev_bflags |= BLIST_FORCELUN;
+
return 0;
}
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ad06255c2ade..adbeb255616a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
+ "Research In Motion",
+ "BlackBerry Bold 9000",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9a68409580d5..a0fa5de210cf 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@ enum {
};
struct vhost_net_ubuf_ref {
- struct kref kref;
+ /* refcount follows semantics similar to kref:
+ * 0: object is released
+ * 1: no outstanding ubufs
+ * >1: outstanding ubufs
+ */
+ atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
};
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
vhost_net_zcopy_mask |= 0x1 << vq;
}
-static void vhost_net_zerocopy_done_signal(struct kref *kref)
-{
- struct vhost_net_ubuf_ref *ubufs;
-
- ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
- wake_up(&ubufs->wait);
-}
-
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
{
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
if (!ubufs)
return ERR_PTR(-ENOMEM);
- kref_init(&ubufs->kref);
+ atomic_set(&ubufs->refcount, 1);
init_waitqueue_head(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+ int r = atomic_sub_return(1, &ubufs->refcount);
+ if (unlikely(!r))
+ wake_up(&ubufs->wait);
+ return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ vhost_net_ubuf_put(ubufs);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
- int cnt = atomic_read(&ubufs->kref.refcount);
+ int cnt;
+
+ rcu_read_lock_bh();
/* set len to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
+ cnt = vhost_net_ubuf_put(ubufs);
/*
* Trigger polling thread if guest stopped submitting new buffers:
- * in this case, the refcount after decrement will eventually reach 1
- * so here it is 2.
+ * in this case, the refcount after decrement will eventually reach 1.
* We also trigger polling periodically after each 16 packets
* (the value 16 here is more or less arbitrary, it's tuned to trigger
* less than 10% of times).
*/
- if (cnt <= 2 || !(cnt % 16))
+ if (cnt <= 1 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
+
+ rcu_read_unlock_bh();
}
/* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
+ atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
} else {
msg.msg_control = NULL;
@@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n)
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
- kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
@@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
fput(tx_sock->file);
if (rx_sock)
fput(rx_sock->file);
+ /* Make sure no callbacks are outstanding */
+ synchronize_rcu_bh();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22262a3a0e2d..dade5b7699bc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+ depends on FB && ARCH_MXC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
config EXYNOS_LCD_S6E8AX0
bool "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+ depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+ depends on (LCD_CLASS_DEVICE = y)
default n
help
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index bbeb8dd7f108..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
- u16 in_height = DIV_ROUND_UP(height, *decim_y);
+ u16 in_height = height / *decim_y;
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
do {
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (r)
return r;
- in_width = DIV_ROUND_UP(in_width, x_predecim);
- in_height = DIV_ROUND_UP(in_height, y_predecim);
+ in_width = in_width / x_predecim;
+ in_height = in_height / y_predecim;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 7411f2674e16..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
struct dsi_clock_info dsi_cinfo;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index efb9ee9e3c96..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
struct sdi_clk_calc_ctx {
unsigned long pck_min, pck_max;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbfa95ca..1b5d48c578e1 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8b777c..9911cd5fddb5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d75c811bfa56..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
-obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 4672e003c0ad..f4a9e3311297 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = ret;
goto out;
}
+ /* New interdomain events are bound to VCPU 0. */
+ bind_evtchn_to_cpu(evtchn, 0);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
- void *buffer, unsigned long bytes)
-{
- unsigned long recorded = 0;
- int i = 0;
-
- while ((recorded < bytes) && (i < desc->nr_addrs)) {
- unsigned long vaddr = (unsigned long)buffer + recorded;
- unsigned long paddr;
- int offset;
- int chunksz;
-
- offset = vaddr % PAGE_SIZE; /* handle partial pages */
- chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
- paddr = xencomm_vtop(vaddr);
- if (paddr == ~0UL) {
- printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
- __func__, vaddr);
- return -EINVAL;
- }
-
- desc->address[i++] = paddr;
- recorded += chunksz;
- }
-
- if (recorded < bytes) {
- printk(KERN_DEBUG
- "%s: could only translate %ld of %ld bytes\n",
- __func__, recorded, bytes);
- return -ENOSPC;
- }
-
- /* mark remaining addresses invalid (just for safety) */
- while (i < desc->nr_addrs)
- desc->address[i++] = XENCOMM_INVALID;
-
- desc->magic = XENCOMM_MAGIC;
-
- return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
- void *buffer, unsigned long bytes)
-{
- struct xencomm_desc *desc;
- unsigned long buffer_ulong = (unsigned long)buffer;
- unsigned long start = buffer_ulong & PAGE_MASK;
- unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
- unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
- unsigned long size = sizeof(*desc) +
- sizeof(desc->address[0]) * nr_addrs;
-
- /*
- * slab allocator returns at least sizeof(void*) aligned pointer.
- * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
- * cross page boundary.
- */
- if (sizeof(*desc) > sizeof(void *)) {
- unsigned long order = get_order(size);
- desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
- order);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs =
- ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
- sizeof(*desc->address);
- } else {
- desc = kmalloc(size, gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = nr_addrs;
- }
- return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
- struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
- if (sizeof(*desc__) > sizeof(void *)) {
- unsigned long size = sizeof(*desc__) +
- sizeof(desc__->address[0]) * desc__->nr_addrs;
- unsigned long order = get_order(size);
- free_pages((unsigned long)__va(desc), order);
- } else
- kfree(__va(desc));
- }
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
- struct xencomm_desc **ret, gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
- int rc;
-
- pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
- if (bytes == 0) {
- /* don't create a descriptor; Xen recognizes NULL. */
- BUG_ON(buffer != NULL);
- *ret = NULL;
- return 0;
- }
-
- BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
- desc = xencomm_alloc(gfp_mask, buffer, bytes);
- if (!desc) {
- printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
- return -ENOMEM;
- }
-
- rc = xencomm_init(desc, buffer, bytes);
- if (rc) {
- printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((struct xencomm_handle *)__pa(desc));
- return rc;
- }
-
- *ret = desc;
- return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
- unsigned long paddr;
-
- BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
- paddr = (unsigned long)xencomm_pa(ptr);
- BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
- unsigned long bytes, struct xencomm_mini *xc_desc,
- struct xencomm_desc **ret)
-{
- int rc = 0;
- struct xencomm_desc *desc;
- BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
- desc = (void *)xc_desc;
-
- desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
- rc = xencomm_init(desc, buffer, bytes);
- if (!rc)
- *ret = desc;
-
- return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
- int rc;
- struct xencomm_desc *desc;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
- if (rc || desc == NULL)
- return NULL;
-
- return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
- struct xencomm_mini *xc_desc)
-{
- int rc;
- struct xencomm_desc *desc = NULL;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create_mini(ptr, bytes, xc_desc,
- &desc);
-
- if (rc)
- return NULL;
-
- return xencomm_pa(desc);
-}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0bad24ddc2e7..0129b78a6908 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio)
}
EXPORT_SYMBOL(bio_integrity_free);
+static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
+{
+ if (bip->bip_slab == BIO_POOL_NONE)
+ return BIP_INLINE_VECS;
+
+ return bvec_nr_vecs(bip->bip_slab);
+}
+
/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
@@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
+ if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
@@ -226,7 +234,8 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
}
EXPORT_SYMBOL(bio_integrity_tag_size);
-int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
+static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
+ int set)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
diff --git a/fs/bio.c b/fs/bio.c
index 75c49a382239..8754e7b6eb49 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -611,7 +611,6 @@ EXPORT_SYMBOL(bio_clone_fast);
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
struct bio_set *bs)
{
- unsigned nr_iovecs = 0;
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
@@ -638,10 +637,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
* __bio_clone_fast() anyways.
*/
- bio_for_each_segment(bv, bio_src, iter)
- nr_iovecs++;
-
- bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
+ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
@@ -650,9 +646,18 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
+ if (bio->bi_rw & REQ_DISCARD)
+ goto integrity_clone;
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+ bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+ goto integrity_clone;
+ }
+
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
+integrity_clone:
if (bio_integrity(bio_src)) {
int ret;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5215f04260b2..81ea55314b1f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3839,7 +3839,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
rb_erase(&ref->rb_node, &head->ref_root);
atomic_dec(&delayed_refs->num_entries);
btrfs_put_delayed_ref(ref);
- cond_resched_lock(&head->lock);
}
if (head->must_insert_reserved)
pin_bytes = true;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 184e9cb39647..d3d44486290b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5154,7 +5154,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_CAST(inode);
}
- return d_splice_alias(inode, dentry);
+ return d_materialise_unique(dentry, inode);
}
unsigned char btrfs_filetype_table[] = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 383ab455bfa7..a6d8efa46bfe 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3537,20 +3537,6 @@ out:
return ret;
}
-static long btrfs_ioctl_global_rsv(struct btrfs_root *root, void __user *arg)
-{
- struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
- u64 reserved;
-
- spin_lock(&block_rsv->lock);
- reserved = block_rsv->reserved;
- spin_unlock(&block_rsv->lock);
-
- if (arg && copy_to_user(arg, &reserved, sizeof(reserved)))
- return -EFAULT;
- return 0;
-}
-
/*
* there are many ways the trans_start and trans_end ioctls can lead
* to deadlocks. They should only be used by applications that
@@ -4757,8 +4743,6 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_logical_to_ino(root, argp);
case BTRFS_IOC_SPACE_INFO:
return btrfs_ioctl_space_info(root, argp);
- case BTRFS_IOC_GLOBAL_RSV:
- return btrfs_ioctl_global_rsv(root, argp);
case BTRFS_IOC_SYNC: {
int ret;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9c8d1a3fdc3a..9dde9717c1b9 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1332,6 +1332,16 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
}
if (cur_clone_root) {
+ if (compressed != BTRFS_COMPRESS_NONE) {
+ /*
+ * Offsets given by iterate_extent_inodes() are relative
+ * to the start of the extent, we need to add logical
+ * offset from the file extent item.
+ * (See why at backref.c:check_extent_in_eb())
+ */
+ cur_clone_root->offset += btrfs_file_extent_offset(eb,
+ fi);
+ }
*found = cur_clone_root;
ret = 0;
} else {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 97cc24198554..d04db817be5c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -566,7 +566,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
kfree(num);
if (info->max_inline) {
- info->max_inline = max_t(u64,
+ info->max_inline = min_t(u64,
info->max_inline,
root->sectorsize);
}
@@ -855,6 +855,7 @@ static struct dentry *get_default_root(struct super_block *sb,
struct btrfs_path *path;
struct btrfs_key location;
struct inode *inode;
+ struct dentry *dentry;
u64 dir_id;
int new = 0;
@@ -925,7 +926,13 @@ setup_root:
return dget(sb->s_root);
}
- return d_obtain_alias(inode);
+ dentry = d_obtain_alias(inode);
+ if (!IS_ERR(dentry)) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_DISCONNECTED;
+ spin_unlock(&dentry->d_lock);
+ }
+ return dentry;
}
static int btrfs_fill_super(struct super_block *sb,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 782374d8fd19..865f4cf9a769 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -578,8 +578,14 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
return -ENOMEM;
list_for_each_entry(dev, &fs_devices->devices, dev_list) {
- struct hd_struct *disk = dev->bdev->bd_part;
- struct kobject *disk_kobj = &part_to_dev(disk)->kobj;
+ struct hd_struct *disk;
+ struct kobject *disk_kobj;
+
+ if (!dev->bdev)
+ continue;
+
+ disk = dev->bdev->bd_part;
+ disk_kobj = &part_to_dev(disk)->kobj;
error = sysfs_create_link(fs_info->device_dir_kobj,
disk_kobj, disk_kobj->name);
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4c2d452c4bfc..21887d63dad5 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -54,11 +54,6 @@ static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
return acl;
}
-void ceph_forget_all_cached_acls(struct inode *inode)
-{
- forget_all_cached_acls(inode);
-}
-
struct posix_acl *ceph_get_acl(struct inode *inode, int type)
{
int size;
@@ -160,11 +155,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
goto out_dput;
}
- if (value)
- ret = __ceph_setxattr(dentry, name, value, size, 0);
- else
- ret = __ceph_removexattr(dentry, name);
-
+ ret = __ceph_setxattr(dentry, name, value, size, 0);
if (ret) {
if (new_mode != old_mode) {
newattrs.ia_mode = old_mode;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6da4df84ba30..45eda6d7a40c 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -100,6 +100,14 @@ static unsigned fpos_off(loff_t p)
return p & 0xffffffff;
}
+static int fpos_cmp(loff_t l, loff_t r)
+{
+ int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
+ if (v)
+ return v;
+ return (int)(fpos_off(l) - fpos_off(r));
+}
+
/*
* When possible, we try to satisfy a readdir by peeking at the
* dcache. We make this work by carefully ordering dentries on
@@ -156,7 +164,7 @@ more:
if (!d_unhashed(dentry) && dentry->d_inode &&
ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
- ctx->pos <= di->offset)
+ fpos_cmp(ctx->pos, di->offset) <= 0)
break;
dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
dentry->d_name.len, dentry->d_name.name, di->offset,
@@ -695,9 +703,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
ceph_mdsc_put_request(req);
if (!err)
- err = ceph_init_acl(dentry, dentry->d_inode, dir);
-
- if (err)
+ ceph_init_acl(dentry, dentry->d_inode, dir);
+ else
d_drop(dentry);
return err;
}
@@ -735,7 +742,9 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
- if (err)
+ if (!err)
+ ceph_init_acl(dentry, dentry->d_inode, dir);
+ else
d_drop(dentry);
return err;
}
@@ -776,7 +785,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
out:
- if (err < 0)
+ if (!err)
+ ceph_init_acl(dentry, dentry->d_inode, dir);
+ else
d_drop(dentry);
return err;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index dfd2ce3419f8..09c7afe32e49 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -286,6 +286,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
} else {
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
+ ceph_init_acl(dentry, dentry->d_inode, dir);
*opened |= FILE_CREATED;
}
err = finish_open(file, dentry, ceph_open, opened);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2df963f1cf5a..10a4ccbf38da 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -144,7 +144,11 @@ enum {
Opt_ino32,
Opt_noino32,
Opt_fscache,
- Opt_nofscache
+ Opt_nofscache,
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ Opt_acl,
+#endif
+ Opt_noacl
};
static match_table_t fsopt_tokens = {
@@ -172,6 +176,10 @@ static match_table_t fsopt_tokens = {
{Opt_noino32, "noino32"},
{Opt_fscache, "fsc"},
{Opt_nofscache, "nofsc"},
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ {Opt_acl, "acl"},
+#endif
+ {Opt_noacl, "noacl"},
{-1, NULL}
};
@@ -271,6 +279,14 @@ static int parse_fsopt_token(char *c, void *private)
case Opt_nofscache:
fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
break;
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ case Opt_acl:
+ fsopt->sb_flags |= MS_POSIXACL;
+ break;
+#endif
+ case Opt_noacl:
+ fsopt->sb_flags &= ~MS_POSIXACL;
+ break;
default:
BUG_ON(token);
}
@@ -438,6 +454,13 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
else
seq_puts(m, ",nofsc");
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ if (fsopt->sb_flags & MS_POSIXACL)
+ seq_puts(m, ",acl");
+ else
+ seq_puts(m, ",noacl");
+#endif
+
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -819,9 +842,6 @@ static int ceph_set_super(struct super_block *s, void *data)
s->s_flags = fsc->mount_options->sb_flags;
s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
- s->s_flags |= MS_POSIXACL;
-#endif
s->s_xattr = ceph_xattr_handlers;
s->s_fs_info = fsc;
@@ -911,6 +931,10 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
struct ceph_options *opt = NULL;
dout("ceph_mount\n");
+
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ flags |= MS_POSIXACL;
+#endif
err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
if (err < 0) {
res = ERR_PTR(err);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 19793b56d0a7..d8801a95b685 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/slab.h>
+#include <linux/posix_acl.h>
#include <linux/ceph/libceph.h>
@@ -743,7 +744,11 @@ extern const struct xattr_handler *ceph_xattr_handlers[];
struct posix_acl *ceph_get_acl(struct inode *, int);
int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
int ceph_init_acl(struct dentry *, struct inode *, struct inode *);
-void ceph_forget_all_cached_acls(struct inode *inode);
+
+static inline void ceph_forget_all_cached_acls(struct inode *inode)
+{
+ forget_all_cached_acls(inode);
+}
#else
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 898b6565ad3e..a55ec37378c6 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -12,6 +12,9 @@
#define XATTR_CEPH_PREFIX "ceph."
#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
+static int __remove_xattr(struct ceph_inode_info *ci,
+ struct ceph_inode_xattr *xattr);
+
/*
* List of handlers for synthetic system.* attributes. Other
* attributes are handled directly.
@@ -319,8 +322,7 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
static int __set_xattr(struct ceph_inode_info *ci,
const char *name, int name_len,
const char *val, int val_len,
- int dirty,
- int should_free_name, int should_free_val,
+ int flags, int update_xattr,
struct ceph_inode_xattr **newxattr)
{
struct rb_node **p;
@@ -349,12 +351,31 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr = NULL;
}
+ if (update_xattr) {
+ int err = 0;
+ if (xattr && (flags & XATTR_CREATE))
+ err = -EEXIST;
+ else if (!xattr && (flags & XATTR_REPLACE))
+ err = -ENODATA;
+ if (err) {
+ kfree(name);
+ kfree(val);
+ return err;
+ }
+ if (update_xattr < 0) {
+ if (xattr)
+ __remove_xattr(ci, xattr);
+ kfree(name);
+ return 0;
+ }
+ }
+
if (!xattr) {
new = 1;
xattr = *newxattr;
xattr->name = name;
xattr->name_len = name_len;
- xattr->should_free_name = should_free_name;
+ xattr->should_free_name = update_xattr;
ci->i_xattrs.count++;
dout("__set_xattr count=%d\n", ci->i_xattrs.count);
@@ -364,7 +385,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (xattr->should_free_val)
kfree((void *)xattr->val);
- if (should_free_name) {
+ if (update_xattr) {
kfree((void *)name);
name = xattr->name;
}
@@ -379,8 +400,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr->val = "";
xattr->val_len = val_len;
- xattr->dirty = dirty;
- xattr->should_free_val = (val && should_free_val);
+ xattr->dirty = update_xattr;
+ xattr->should_free_val = (val && update_xattr);
if (new) {
rb_link_node(&xattr->node, parent, p);
@@ -442,7 +463,7 @@ static int __remove_xattr(struct ceph_inode_info *ci,
struct ceph_inode_xattr *xattr)
{
if (!xattr)
- return -EOPNOTSUPP;
+ return -ENODATA;
rb_erase(&xattr->node, &ci->i_xattrs.index);
@@ -588,7 +609,7 @@ start:
p += len;
err = __set_xattr(ci, name, namelen, val, len,
- 0, 0, 0, &xattrs[numattr]);
+ 0, 0, &xattrs[numattr]);
if (err < 0)
goto bad;
@@ -850,6 +871,9 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
dout("setxattr value=%.*s\n", (int)size, value);
+ if (!value)
+ flags |= CEPH_XATTR_REMOVE;
+
/* do request */
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
USE_AUTH_MDS);
@@ -892,7 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
struct ceph_inode_info *ci = ceph_inode(inode);
int issued;
int err;
- int dirty;
+ int dirty = 0;
int name_len = strlen(name);
int val_len = size;
char *newname = NULL;
@@ -953,12 +977,14 @@ retry:
goto retry;
}
- err = __set_xattr(ci, newname, name_len, newval,
- val_len, 1, 1, 1, &xattr);
+ err = __set_xattr(ci, newname, name_len, newval, val_len,
+ flags, value ? 1 : -1, &xattr);
- dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
- ci->i_xattrs.dirty = true;
- inode->i_ctime = CURRENT_TIME;
+ if (!err) {
+ dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+ ci->i_xattrs.dirty = true;
+ inode->i_ctime = CURRENT_TIME;
+ }
spin_unlock(&ci->i_ceph_lock);
if (dirty)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index c819b0bd491a..7ff866dbb89e 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -865,8 +865,8 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
return rc;
}
-static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
- __u16 fid, u32 *pacllen)
+struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ const struct cifs_fid *cifsfid, u32 *pacllen)
{
struct cifs_ntsd *pntsd = NULL;
unsigned int xid;
@@ -877,7 +877,8 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
return ERR_CAST(tlink);
xid = get_xid();
- rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
+ rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
+ pacllen);
free_xid(xid);
cifs_put_tlink(tlink);
@@ -946,7 +947,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
if (!open_file)
return get_cifs_acl_by_path(cifs_sb, path, pacllen);
- pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen);
+ pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
cifsFileInfo_put(open_file);
return pntsd;
}
@@ -1006,19 +1007,31 @@ out:
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
- struct inode *inode, const char *path, const __u16 *pfid)
+ struct inode *inode, const char *path,
+ const struct cifs_fid *pfid)
{
struct cifs_ntsd *pntsd = NULL;
u32 acllen = 0;
int rc = 0;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct cifs_tcon *tcon;
cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
- if (pfid)
- pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
- else
- pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+ if (pfid && (tcon->ses->server->ops->get_acl_by_fid))
+ pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid,
+ &acllen);
+ else if (tcon->ses->server->ops->get_acl)
+ pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
+ &acllen);
+ else {
+ cifs_put_tlink(tlink);
+ return -EOPNOTSUPP;
+ }
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
@@ -1030,6 +1043,8 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
}
+ cifs_put_tlink(tlink);
+
return rc;
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 86dc28c7aa5c..cf32f0393369 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -398,6 +398,8 @@ struct smb_version_operations {
const struct nls_table *, int);
struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
const char *, u32 *);
+ struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
+ const struct cifs_fid *, u32 *);
int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
int);
};
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d00e09dfc452..acc4ee8ed075 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -151,7 +151,7 @@ extern struct inode *cifs_iget(struct super_block *sb,
extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
FILE_ALL_INFO *data, struct super_block *sb,
- int xid, const __u16 *fid);
+ int xid, const struct cifs_fid *fid);
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
struct super_block *sb, unsigned int xid);
@@ -162,11 +162,13 @@ extern int cifs_rename_pending_delete(const char *full_path,
const unsigned int xid);
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
- const char *path, const __u16 *pfid);
+ const char *path, const struct cifs_fid *pfid);
extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
kuid_t, kgid_t);
extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
const char *, u32 *);
+extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
+ const struct cifs_fid *, u32 *);
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
const char *, int);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d3a6796caa5a..3db0c5fd9a11 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -378,7 +378,7 @@ cifs_create_get_file_info:
xid);
else {
rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
- xid, &fid->netfid);
+ xid, fid);
if (newinode) {
if (server->ops->set_lease_key)
server->ops->set_lease_key(newinode, fid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 755584684f6c..53c15074bb36 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
xid);
else
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
- xid, &fid->netfid);
+ xid, fid);
out:
kfree(buf);
@@ -2389,7 +2389,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
unsigned long nr_pages, i;
- size_t copied, len, cur_len;
+ size_t bytes, copied, len, cur_len;
ssize_t total_written = 0;
loff_t offset;
struct iov_iter it;
@@ -2444,14 +2444,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
- copied = min_t(const size_t, cur_len, PAGE_SIZE);
+ bytes = min_t(const size_t, cur_len, PAGE_SIZE);
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
- 0, copied);
+ 0, bytes);
cur_len -= copied;
iov_iter_advance(&it, copied);
+ /*
+ * If we didn't copy as much as we expected, then that
+ * may mean we trod into an unmapped area. Stop copying
+ * at that point. On the next pass through the big
+ * loop, we'll likely end up getting a zero-length
+ * write and bailing out of it.
+ */
+ if (copied < bytes)
+ break;
}
cur_len = save_len - cur_len;
+ /*
+ * If we have no data to send, then that probably means that
+ * the copy above failed altogether. That's most likely because
+ * the address in the iovec was bogus. Set the rc to -EFAULT,
+ * free anything we allocated and bail out.
+ */
+ if (!cur_len) {
+ for (i = 0; i < nr_pages; i++)
+ put_page(wdata->pages[i]);
+ kfree(wdata);
+ rc = -EFAULT;
+ break;
+ }
+
+ /*
+ * i + 1 now represents the number of pages we actually used in
+ * the copy phase above. Bring nr_pages down to that, and free
+ * any pages that we didn't use.
+ */
+ for ( ; nr_pages > i + 1; nr_pages--)
+ put_page(wdata->pages[nr_pages - 1]);
+
wdata->sync_mode = WB_SYNC_ALL;
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index be58b8fcdb3c..aadc2b68678b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -677,7 +677,7 @@ cgfi_exit:
int
cifs_get_inode_info(struct inode **inode, const char *full_path,
FILE_ALL_INFO *data, struct super_block *sb, int xid,
- const __u16 *fid)
+ const struct cifs_fid *fid)
{
bool validinum = false;
__u16 srchflgs;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index bfd66d84831e..526fb89f9230 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1073,6 +1073,7 @@ struct smb_version_operations smb1_operations = {
#endif /* CIFS_XATTR */
#ifdef CONFIG_CIFS_ACL
.get_acl = get_cifs_acl,
+ .get_acl_by_fid = get_cifs_acl_by_fid,
.set_acl = set_cifs_acl,
#endif /* CIFS_ACL */
};
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index c38350851b08..bc0bb9c34f72 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -57,4 +57,7 @@
#define SMB2_CMACAES_SIZE (16)
#define SMB3_SIGNKEY_SIZE (16)
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 757da3e54d3d..192f51a12cf1 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified wsize, or default */
wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
- /*
- * limit write size to 2 ** 16, because we don't support multicredit
- * requests now.
- */
- wsize = min_t(unsigned int, wsize, 2 << 15);
+ /* set it to the maximum buffer size value we can send with 1 credit */
+ wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
return wsize;
}
@@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified rsize, or default */
rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
- /*
- * limit write size to 2 ** 16, because we don't support multicredit
- * requests now.
- */
- rsize = min_t(unsigned int, rsize, 2 << 15);
+ /* set it to the maximum buffer size value we can send with 1 credit */
+ rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
return rsize;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index a3f7a9c3cc69..860344701067 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
/* SMB2 only has an extended negflavor */
server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
- server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
+ /* set it to the maximum buffer size value we can send with 1 credit */
+ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
+ SMB2_MAX_BUFFER_SIZE);
server->max_read = le32_to_cpu(rsp->MaxReadSize);
server->max_write = le32_to_cpu(rsp->MaxWriteSize);
/* BB Do we need to validate the SecurityMode? */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ece55565b9cd..d3a534fdc5ff 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -771,6 +771,8 @@ do { \
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
(einode)->xtime.tv_sec = \
(signed)le32_to_cpu((raw_inode)->xtime); \
+ else \
+ (einode)->xtime.tv_sec = 0; \
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
ext4_decode_extra_time(&(einode)->xtime, \
raw_inode->xtime ## _extra); \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 10cff4736b11..74bc2d549c58 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3906,6 +3906,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
} else
err = ret;
map->m_flags |= EXT4_MAP_MAPPED;
+ map->m_pblk = newblock;
if (allocated > map->m_len)
allocated = map->m_len;
map->m_len = allocated;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6bea80614d77..a2a837f00407 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -140,7 +140,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
if (IS_ERR(handle)) {
err = -EINVAL;
- goto swap_boot_out;
+ goto journal_err_out;
}
/* Protect extent tree against block allocations via delalloc */
@@ -198,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
ext4_double_up_write_data_sem(inode, inode_bl);
+journal_err_out:
ext4_inode_resume_unlocked_dio(inode);
ext4_inode_resume_unlocked_dio(inode_bl);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c5adbb318a90..f3b84cd9de56 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
ext4_group_t group;
ext4_group_t last_group;
unsigned overhead;
+ __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
BUG_ON(flex_gd->count == 0 || group_data == NULL);
@@ -266,7 +267,7 @@ next_group:
src_group++;
for (; src_group <= last_group; src_group++) {
overhead = ext4_group_overhead_blocks(sb, src_group);
- if (overhead != 0)
+ if (overhead == 0)
last_blk += group_data[src_group - group].blocks_count;
else
break;
@@ -280,8 +281,7 @@ next_group:
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
group_data[group].free_blocks_count--;
- if (flexbg_size > 1)
- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ flex_gd->bg_flags[group] &= uninit_mask;
}
/* Allocate inode bitmaps */
@@ -292,22 +292,30 @@ next_group:
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
group_data[group].free_blocks_count--;
- if (flexbg_size > 1)
- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ flex_gd->bg_flags[group] &= uninit_mask;
}
/* Allocate inode tables */
for (; it_index < flex_gd->count; it_index++) {
- if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
+ unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
+ ext4_fsblk_t next_group_start;
+
+ if (start_blk + itb > last_blk)
goto next_group;
group_data[it_index].inode_table = start_blk;
- group = ext4_get_group_number(sb, start_blk - 1);
+ group = ext4_get_group_number(sb, start_blk);
+ next_group_start = ext4_group_first_block_no(sb, group + 1);
group -= group_data[0].group;
- group_data[group].free_blocks_count -=
- EXT4_SB(sb)->s_itb_per_group;
- if (flexbg_size > 1)
- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ if (start_blk + itb > next_group_start) {
+ flex_gd->bg_flags[group + 1] &= uninit_mask;
+ overhead = start_blk + itb - next_group_start;
+ group_data[group + 1].free_blocks_count -= overhead;
+ itb -= overhead;
+ }
+
+ group_data[group].free_blocks_count -= itb;
+ flex_gd->bg_flags[group] &= uninit_mask;
start_blk += EXT4_SB(sb)->s_itb_per_group;
}
@@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
start = ext4_group_first_block_no(sb, group);
group -= flex_gd->groups[0].group;
- count2 = sb->s_blocksize * 8 - (block - start);
+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
if (count2 > count)
count2 = count;
@@ -620,7 +628,7 @@ handle_ib:
if (err)
goto out;
count = group_table_count[j];
- start = group_data[i].block_bitmap;
+ start = (&group_data[i].block_bitmap)[j];
block = start;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1f7784de05b6..710fed2377d4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3695,16 +3695,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
sbi->s_def_hash_version = es->s_def_hash_version;
- i = le32_to_cpu(es->s_flags);
- if (i & EXT2_FLAGS_UNSIGNED_HASH)
- sbi->s_hash_unsigned = 3;
- else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+ if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ i = le32_to_cpu(es->s_flags);
+ if (i & EXT2_FLAGS_UNSIGNED_HASH)
+ sbi->s_hash_unsigned = 3;
+ else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
- es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
- sbi->s_hash_unsigned = 3;
+ if (!(sb->s_flags & MS_RDONLY))
+ es->s_flags |=
+ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+ sbi->s_hash_unsigned = 3;
#else
- es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+ if (!(sb->s_flags & MS_RDONLY))
+ es->s_flags |=
+ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
+ }
}
/* Handle clustersize */
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e1959efad64f..b5ebc2d7d80d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj)
struct fscache_object *xobj;
struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
+ ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
+
write_lock(&fscache_object_list_lock);
while (*p) {
@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj)
*/
void fscache_objlist_remove(struct fscache_object *obj)
{
+ if (RB_EMPTY_NODE(&obj->objlist_link))
+ return;
+
write_lock(&fscache_object_list_lock);
BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 53d35c504240..d3b4539f1651 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object,
object->cache = cache;
object->cookie = cookie;
object->parent = NULL;
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+ RB_CLEAR_NODE(&object->objlist_link);
+#endif
object->oob_event_mask = 0;
for (t = object->oob_table; t->events; t++)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 8360674c85bc..60bb365f54a5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
* similarly constrained call sites
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
- if (ret < 0)
+ if (ret < 0) {
jbd2_journal_free_reserved(handle);
+ return ret;
+ }
handle->h_type = type;
handle->h_line_no = line_no;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(jbd2_journal_start_reserved);
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index e973b85d6afd..5a8ea16eedbc 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -86,6 +86,8 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
rc = posix_acl_equiv_mode(acl, &inode->i_mode);
if (rc < 0)
return rc;
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
if (rc == 0)
acl = NULL;
break;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e066a3902973..ab798a88ec1d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
+ loff_t fl_start, fl_end;
dprintk("lockd: grant blocked lock %p\n", block);
@@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
}
/* Try the lock operation again */
+ /* vfs_lock_file() can mangle fl_start and fl_end, but we need
+ * them unchanged for the GRANT_MSG
+ */
lock->fl.fl_flags |= FL_SLEEP;
+ fl_start = lock->fl.fl_start;
+ fl_end = lock->fl.fl_end;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.fl_start = fl_start;
+ lock->fl.fl_end = fl_end;
switch (error) {
case 0:
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index d3a587144222..d190e33d0ec2 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -151,17 +151,15 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(pacl))
return PTR_ERR(pacl);
- /* allocate for worst case: one (deny, allow) pair each: */
- size += 2 * pacl->a_count;
}
+ /* allocate for worst case: one (deny, allow) pair each: */
+ size += 2 * pacl->a_count;
if (S_ISDIR(inode->i_mode)) {
flags = NFS4_ACL_DIR;
dpacl = get_acl(inode, ACL_TYPE_DEFAULT);
if (dpacl)
size += 2 * dpacl->a_count;
- } else {
- dpacl = NULL;
}
*acl = nfs4_acl_new(size);
@@ -170,8 +168,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
goto out;
}
- if (pacl)
- _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
+ _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
if (dpacl)
_posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT);
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 2b7882b508db..9a3c68cf6026 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -324,23 +324,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
switch (flag) {
case M_INSERT: /* insert item into L[0] */
- if (item_pos == tb->lnum[0] - 1
- && tb->lbytes != -1) {
+ if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* part of new item falls into L[0] */
int new_item_len;
int version;
- ret_val =
- leaf_shift_left(tb, tb->lnum[0] - 1,
- -1);
+ ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
/* Calculate item length to insert to S[0] */
- new_item_len =
- ih_item_len(ih) - tb->lbytes;
+ new_item_len = ih_item_len(ih) - tb->lbytes;
/* Calculate and check item length to insert to L[0] */
- put_ih_item_len(ih,
- ih_item_len(ih) -
- new_item_len);
+ put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
RFALSE(ih_item_len(ih) <= 0,
"PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
@@ -349,30 +343,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* Insert new item into L[0] */
buffer_info_init_left(tb, &bi);
leaf_insert_into_buf(&bi,
- n + item_pos -
- ret_val, ih, body,
- zeros_num >
- ih_item_len(ih) ?
- ih_item_len(ih) :
- zeros_num);
+ n + item_pos - ret_val, ih, body,
+ zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
version = ih_version(ih);
/* Calculate key component, item length and body to insert into S[0] */
- set_le_ih_k_offset(ih,
- le_ih_k_offset(ih) +
- (tb->
- lbytes <<
- (is_indirect_le_ih
- (ih) ? tb->tb_sb->
- s_blocksize_bits -
- UNFM_P_SHIFT :
- 0)));
+ set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
+ (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
put_ih_item_len(ih, new_item_len);
if (tb->lbytes > zeros_num) {
- body +=
- (tb->lbytes - zeros_num);
+ body += (tb->lbytes - zeros_num);
zeros_num = 0;
} else
zeros_num -= tb->lbytes;
@@ -383,15 +365,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
} else {
/* new item in whole falls into L[0] */
/* Shift lnum[0]-1 items to L[0] */
- ret_val =
- leaf_shift_left(tb, tb->lnum[0] - 1,
- tb->lbytes);
+ ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
/* Insert new item into L[0] */
buffer_info_init_left(tb, &bi);
- leaf_insert_into_buf(&bi,
- n + item_pos -
- ret_val, ih, body,
- zeros_num);
+ leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num);
tb->insert_size[0] = 0;
zeros_num = 0;
}
@@ -399,264 +376,117 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
case M_PASTE: /* append item in L[0] */
- if (item_pos == tb->lnum[0] - 1
- && tb->lbytes != -1) {
+ if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* we must shift the part of the appended item */
- if (is_direntry_le_ih
- (B_N_PITEM_HEAD(tbS0, item_pos))) {
+ if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) {
RFALSE(zeros_num,
"PAP-12090: invalid parameter in case of a directory");
/* directory item */
if (tb->lbytes > pos_in_item) {
/* new directory entry falls into L[0] */
- struct item_head
- *pasted;
- int l_pos_in_item =
- pos_in_item;
+ struct item_head *pasted;
+ int l_pos_in_item = pos_in_item;
/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
- ret_val =
- leaf_shift_left(tb,
- tb->
- lnum
- [0],
- tb->
- lbytes
- -
- 1);
- if (ret_val
- && !item_pos) {
- pasted =
- B_N_PITEM_HEAD
- (tb->L[0],
- B_NR_ITEMS
- (tb->
- L[0]) -
- 1);
- l_pos_in_item +=
- I_ENTRY_COUNT
- (pasted) -
- (tb->
- lbytes -
- 1);
+ ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1);
+ if (ret_val && !item_pos) {
+ pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
+ l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1);
}
/* Append given directory entry to directory item */
buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer
- (&bi,
- n + item_pos -
- ret_val,
- l_pos_in_item,
- tb->insert_size[0],
- body, zeros_num);
+ leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num);
/* previous string prepared space for pasting new entry, following string pastes this entry */
/* when we have merge directory item, pos_in_item has been changed too */
/* paste new directory entry. 1 is entry number */
- leaf_paste_entries(&bi,
- n +
- item_pos
- -
- ret_val,
- l_pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)
- body,
- body
- +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item,
+ 1, (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
tb->insert_size[0] = 0;
} else {
/* new directory item doesn't fall into L[0] */
/* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
- leaf_shift_left(tb,
- tb->
- lnum[0],
- tb->
- lbytes);
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
}
/* Calculate new position to append in item body */
pos_in_item -= tb->lbytes;
} else {
/* regular object */
- RFALSE(tb->lbytes <= 0,
- "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
- tb->lbytes);
- RFALSE(pos_in_item !=
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)),
+ RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes);
+ RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
"PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)),
- pos_in_item);
+ ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item);
if (tb->lbytes >= pos_in_item) {
/* appended item will be in L[0] in whole */
int l_n;
/* this bytes number must be appended to the last item of L[h] */
- l_n =
- tb->lbytes -
- pos_in_item;
+ l_n = tb->lbytes - pos_in_item;
/* Calculate new insert_size[0] */
- tb->insert_size[0] -=
- l_n;
+ tb->insert_size[0] -= l_n;
- RFALSE(tb->
- insert_size[0] <=
- 0,
+ RFALSE(tb->insert_size[0] <= 0,
"PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
- tb->
- insert_size[0]);
- ret_val =
- leaf_shift_left(tb,
- tb->
- lnum
- [0],
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0,
- item_pos)));
+ tb->insert_size[0]);
+ ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len
+ (B_N_PITEM_HEAD(tbS0, item_pos)));
/* Append to body of item in L[0] */
buffer_info_init_left(tb, &bi);
leaf_paste_in_buffer
- (&bi,
- n + item_pos -
- ret_val,
- ih_item_len
- (B_N_PITEM_HEAD
- (tb->L[0],
- n + item_pos -
- ret_val)), l_n,
- body,
- zeros_num >
- l_n ? l_n :
- zeros_num);
+ (&bi, n + item_pos - ret_val, ih_item_len
+ (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)),
+ l_n, body,
+ zeros_num > l_n ? l_n : zeros_num);
/* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
{
int version;
- int temp_l =
- l_n;
-
- RFALSE
- (ih_item_len
- (B_N_PITEM_HEAD
- (tbS0,
- 0)),
+ int temp_l = l_n;
+
+ RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)),
"PAP-12106: item length must be 0");
- RFALSE
- (comp_short_le_keys
- (B_N_PKEY
- (tbS0, 0),
- B_N_PKEY
- (tb->L[0],
- n +
- item_pos
- -
- ret_val)),
+ RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY
+ (tb->L[0], n + item_pos - ret_val)),
"PAP-12107: items must be of the same file");
if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
- temp_l =
- l_n
- <<
- (tb->
- tb_sb->
- s_blocksize_bits
- -
- UNFM_P_SHIFT);
+ temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT);
}
/* update key of first item in S0 */
- version =
- ih_version
- (B_N_PITEM_HEAD
- (tbS0, 0));
- set_le_key_k_offset
- (version,
- B_N_PKEY
- (tbS0, 0),
- le_key_k_offset
- (version,
- B_N_PKEY
- (tbS0,
- 0)) +
- temp_l);
+ version = ih_version(B_N_PITEM_HEAD(tbS0, 0));
+ set_le_key_k_offset(version, B_N_PKEY(tbS0, 0),
+ le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l);
/* update left delimiting key */
- set_le_key_k_offset
- (version,
- B_N_PDELIM_KEY
- (tb->
- CFL[0],
- tb->
- lkey[0]),
- le_key_k_offset
- (version,
- B_N_PDELIM_KEY
- (tb->
- CFL[0],
- tb->
- lkey[0]))
- + temp_l);
+ set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
+ le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l);
}
/* Calculate new body, position in item and insert_size[0] */
if (l_n > zeros_num) {
- body +=
- (l_n -
- zeros_num);
+ body += (l_n - zeros_num);
zeros_num = 0;
} else
- zeros_num -=
- l_n;
+ zeros_num -= l_n;
pos_in_item = 0;
- RFALSE
- (comp_short_le_keys
- (B_N_PKEY(tbS0, 0),
- B_N_PKEY(tb->L[0],
- B_NR_ITEMS
- (tb->
- L[0]) -
- 1))
- ||
- !op_is_left_mergeable
- (B_N_PKEY(tbS0, 0),
- tbS0->b_size)
- ||
- !op_is_left_mergeable
- (B_N_PDELIM_KEY
- (tb->CFL[0],
- tb->lkey[0]),
- tbS0->b_size),
+ RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1))
+ || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)
+ || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size),
"PAP-12120: item must be merge-able with left neighboring item");
} else { /* only part of the appended item will be in L[0] */
/* Calculate position in item for append in S[0] */
- pos_in_item -=
- tb->lbytes;
+ pos_in_item -= tb->lbytes;
- RFALSE(pos_in_item <= 0,
- "PAP-12125: no place for paste. pos_in_item=%d",
- pos_in_item);
+ RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
- leaf_shift_left(tb,
- tb->
- lnum[0],
- tb->
- lbytes);
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
}
}
} else { /* appended item will be in L[0] in whole */
@@ -665,52 +495,30 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */
/* then increment pos_in_item by the size of the last item in L[0] */
- pasted =
- B_N_PITEM_HEAD(tb->L[0],
- n - 1);
+ pasted = B_N_PITEM_HEAD(tb->L[0], n - 1);
if (is_direntry_le_ih(pasted))
- pos_in_item +=
- ih_entry_count
- (pasted);
+ pos_in_item += ih_entry_count(pasted);
else
- pos_in_item +=
- ih_item_len(pasted);
+ pos_in_item += ih_item_len(pasted);
}
/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
- ret_val =
- leaf_shift_left(tb, tb->lnum[0],
- tb->lbytes);
+ ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
/* Append to body of item in L[0] */
buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer(&bi,
- n + item_pos -
- ret_val,
+ leaf_paste_in_buffer(&bi, n + item_pos - ret_val,
pos_in_item,
tb->insert_size[0],
body, zeros_num);
/* if appended item is directory, paste entry */
- pasted =
- B_N_PITEM_HEAD(tb->L[0],
- n + item_pos -
- ret_val);
+ pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val);
if (is_direntry_le_ih(pasted))
- leaf_paste_entries(&bi,
- n +
- item_pos -
- ret_val,
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, n + item_pos - ret_val,
+ pos_in_item, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE,
+ tb->insert_size[0]);
/* if appended item is indirect item, put unformatted node into un list */
if (is_indirect_le_ih(pasted))
set_ih_free_space(pasted, 0);
@@ -722,13 +530,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
reiserfs_panic(tb->tb_sb, "PAP-12130",
"lnum > 0: unexpected mode: "
" %s(%d)",
- (flag ==
- M_DELETE) ? "DELETE" : ((flag ==
- M_CUT)
- ? "CUT"
- :
- "UNKNOWN"),
- flag);
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
}
} else {
/* new item doesn't fall into L[0] */
@@ -748,14 +550,12 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
case M_INSERT: /* insert item */
if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */
if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */
- loff_t old_key_comp, old_len,
- r_zeros_number;
+ loff_t old_key_comp, old_len, r_zeros_number;
const char *r_body;
int version;
loff_t offset;
- leaf_shift_right(tb, tb->rnum[0] - 1,
- -1);
+ leaf_shift_right(tb, tb->rnum[0] - 1, -1);
version = ih_version(ih);
/* Remember key component and item length */
@@ -763,29 +563,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
old_len = ih_item_len(ih);
/* Calculate key component and item length to insert into R[0] */
- offset =
- le_ih_k_offset(ih) +
- ((old_len -
- tb->
- rbytes) << (is_indirect_le_ih(ih)
- ? tb->tb_sb->
- s_blocksize_bits -
- UNFM_P_SHIFT : 0));
+ offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0));
set_le_ih_k_offset(ih, offset);
put_ih_item_len(ih, tb->rbytes);
/* Insert part of the item into R[0] */
buffer_info_init_right(tb, &bi);
if ((old_len - tb->rbytes) > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + (old_len -
- tb->rbytes) -
- zeros_num;
+ r_body = body + (old_len - tb->rbytes) - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - (old_len -
- tb->rbytes);
+ r_zeros_number = zeros_num - (old_len - tb->rbytes);
zeros_num -= r_zeros_number;
}
@@ -798,25 +586,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* Calculate key component and item length to insert into S[0] */
set_le_ih_k_offset(ih, old_key_comp);
- put_ih_item_len(ih,
- old_len - tb->rbytes);
+ put_ih_item_len(ih, old_len - tb->rbytes);
tb->insert_size[0] -= tb->rbytes;
} else { /* whole new item falls into R[0] */
/* Shift rnum[0]-1 items to R[0] */
- ret_val =
- leaf_shift_right(tb,
- tb->rnum[0] - 1,
- tb->rbytes);
+ ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
/* Insert new item into R[0] */
buffer_info_init_right(tb, &bi);
- leaf_insert_into_buf(&bi,
- item_pos - n +
- tb->rnum[0] - 1,
- ih, body,
- zeros_num);
+ leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1,
+ ih, body, zeros_num);
if (item_pos - n + tb->rnum[0] - 1 == 0) {
replace_key(tb, tb->CFR[0],
@@ -841,200 +622,97 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
RFALSE(zeros_num,
"PAP-12145: invalid parameter in case of a directory");
- entry_count =
- I_ENTRY_COUNT(B_N_PITEM_HEAD
- (tbS0,
- item_pos));
+ entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD
+ (tbS0, item_pos));
if (entry_count - tb->rbytes <
pos_in_item)
/* new directory entry falls into R[0] */
{
int paste_entry_position;
- RFALSE(tb->rbytes - 1 >=
- entry_count
- || !tb->
- insert_size[0],
+ RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0],
"PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
- tb->rbytes,
- entry_count);
+ tb->rbytes, entry_count);
/* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
- leaf_shift_right(tb,
- tb->
- rnum
- [0],
- tb->
- rbytes
- - 1);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
/* Paste given directory entry to directory item */
- paste_entry_position =
- pos_in_item -
- entry_count +
- tb->rbytes - 1;
+ paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
buffer_info_init_right(tb, &bi);
- leaf_paste_in_buffer
- (&bi, 0,
- paste_entry_position,
- tb->insert_size[0],
- body, zeros_num);
+ leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num);
/* paste entry */
- leaf_paste_entries(&bi,
- 0,
- paste_entry_position,
- 1,
- (struct
- reiserfs_de_head
- *)
- body,
- body
- +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
-
- if (paste_entry_position
- == 0) {
+ leaf_paste_entries(&bi, 0, paste_entry_position, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ if (paste_entry_position == 0) {
/* change delimiting keys */
- replace_key(tb,
- tb->
- CFR
- [0],
- tb->
- rkey
- [0],
- tb->
- R
- [0],
- 0);
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0);
}
tb->insert_size[0] = 0;
pos_in_item++;
} else { /* new directory entry doesn't fall into R[0] */
- leaf_shift_right(tb,
- tb->
- rnum
- [0],
- tb->
- rbytes);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
}
} else { /* regular object */
- int n_shift, n_rem,
- r_zeros_number;
+ int n_shift, n_rem, r_zeros_number;
const char *r_body;
/* Calculate number of bytes which must be shifted from appended item */
- if ((n_shift =
- tb->rbytes -
- tb->insert_size[0]) < 0)
+ if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0)
n_shift = 0;
- RFALSE(pos_in_item !=
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)),
+ RFALSE(pos_in_item != ih_item_len
+ (B_N_PITEM_HEAD(tbS0, item_pos)),
"PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
- pos_in_item,
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)));
-
- leaf_shift_right(tb,
- tb->rnum[0],
- n_shift);
+ pos_in_item, ih_item_len
+ (B_N_PITEM_HEAD(tbS0, item_pos)));
+
+ leaf_shift_right(tb, tb->rnum[0], n_shift);
/* Calculate number of bytes which must remain in body after appending to R[0] */
- if ((n_rem =
- tb->insert_size[0] -
- tb->rbytes) < 0)
+ if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0)
n_rem = 0;
{
int version;
- unsigned long temp_rem =
- n_rem;
-
- version =
- ih_version
- (B_N_PITEM_HEAD
- (tb->R[0], 0));
- if (is_indirect_le_key
- (version,
- B_N_PKEY(tb->R[0],
- 0))) {
- temp_rem =
- n_rem <<
- (tb->tb_sb->
- s_blocksize_bits
- -
- UNFM_P_SHIFT);
+ unsigned long temp_rem = n_rem;
+
+ version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0));
+ if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) {
+ temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
}
- set_le_key_k_offset
- (version,
- B_N_PKEY(tb->R[0],
- 0),
- le_key_k_offset
- (version,
- B_N_PKEY(tb->R[0],
- 0)) +
- temp_rem);
- set_le_key_k_offset
- (version,
- B_N_PDELIM_KEY(tb->
- CFR
- [0],
- tb->
- rkey
- [0]),
- le_key_k_offset
- (version,
- B_N_PDELIM_KEY
- (tb->CFR[0],
- tb->rkey[0])) +
- temp_rem);
+ set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0),
+ le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem);
+ set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]),
+ le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem);
}
/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
- do_balance_mark_internal_dirty
- (tb, tb->CFR[0], 0);
+ do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
/* Append part of body into R[0] */
buffer_info_init_right(tb, &bi);
if (n_rem > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + n_rem -
- zeros_num;
+ r_body = body + n_rem - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - n_rem;
- zeros_num -=
- r_zeros_number;
+ r_zeros_number = zeros_num - n_rem;
+ zeros_num -= r_zeros_number;
}
- leaf_paste_in_buffer(&bi, 0,
- n_shift,
- tb->
- insert_size
- [0] -
- n_rem,
- r_body,
- r_zeros_number);
-
- if (is_indirect_le_ih
- (B_N_PITEM_HEAD
- (tb->R[0], 0))) {
+ leaf_paste_in_buffer(&bi, 0, n_shift,
+ tb->insert_size[0] - n_rem,
+ r_body, r_zeros_number);
+
+ if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) {
#if 0
RFALSE(n_rem,
"PAP-12160: paste more than one unformatted node pointer");
#endif
- set_ih_free_space
- (B_N_PITEM_HEAD
- (tb->R[0], 0), 0);
+ set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0);
}
tb->insert_size[0] = n_rem;
if (!n_rem)
@@ -1044,58 +722,28 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
struct item_head *pasted;
- ret_val =
- leaf_shift_right(tb, tb->rnum[0],
- tb->rbytes);
+ ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
/* append item in R[0] */
if (pos_in_item >= 0) {
buffer_info_init_right(tb, &bi);
- leaf_paste_in_buffer(&bi,
- item_pos -
- n +
- tb->
- rnum[0],
- pos_in_item,
- tb->
- insert_size
- [0], body,
- zeros_num);
+ leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item,
+ tb->insert_size[0], body, zeros_num);
}
/* paste new entry, if item is directory item */
- pasted =
- B_N_PITEM_HEAD(tb->R[0],
- item_pos - n +
- tb->rnum[0]);
- if (is_direntry_le_ih(pasted)
- && pos_in_item >= 0) {
- leaf_paste_entries(&bi,
- item_pos -
- n +
- tb->rnum[0],
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
+ if (is_direntry_le_ih(pasted) && pos_in_item >= 0) {
+ leaf_paste_entries(&bi, item_pos - n + tb->rnum[0],
+ pos_in_item, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
if (!pos_in_item) {
- RFALSE(item_pos - n +
- tb->rnum[0],
+ RFALSE(item_pos - n + tb->rnum[0],
"PAP-12165: directory item must be first item of node when pasting is in 0th position");
/* update delimiting keys */
- replace_key(tb,
- tb->CFR[0],
- tb->rkey[0],
- tb->R[0],
- 0);
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
}
}
@@ -1111,22 +759,16 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
default: /* cases d and t */
reiserfs_panic(tb->tb_sb, "PAP-12175",
"rnum > 0: unexpected mode: %s(%d)",
- (flag ==
- M_DELETE) ? "DELETE" : ((flag ==
- M_CUT) ? "CUT"
- : "UNKNOWN"),
- flag);
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
}
}
/* tb->rnum[0] > 0 */
RFALSE(tb->blknum[0] > 3,
- "PAP-12180: blknum can not be %d. It must be <= 3",
- tb->blknum[0]);
+ "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
RFALSE(tb->blknum[0] < 0,
- "PAP-12185: blknum can not be %d. It must be >= 0",
- tb->blknum[0]);
+ "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
/* if while adding to a node we discover that it is possible to split
it in two, and merge the left part into the left neighbor and the
@@ -1177,8 +819,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */
if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */
- int old_key_comp, old_len,
- r_zeros_number;
+ int old_key_comp, old_len, r_zeros_number;
const char *r_body;
int version;
@@ -1192,15 +833,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
old_len = ih_item_len(ih);
/* Calculate key component and item length to insert into S_new[i] */
- set_le_ih_k_offset(ih,
- le_ih_k_offset(ih) +
- ((old_len -
- sbytes[i]) <<
- (is_indirect_le_ih
- (ih) ? tb->tb_sb->
- s_blocksize_bits -
- UNFM_P_SHIFT :
- 0)));
+ set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
+ ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
put_ih_item_len(ih, sbytes[i]);
@@ -1209,39 +843,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
if ((old_len - sbytes[i]) > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + (old_len -
- sbytes[i]) -
- zeros_num;
+ r_body = body + (old_len - sbytes[i]) - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - (old_len -
- sbytes[i]);
+ r_zeros_number = zeros_num - (old_len - sbytes[i]);
zeros_num -= r_zeros_number;
}
- leaf_insert_into_buf(&bi, 0, ih, r_body,
- r_zeros_number);
+ leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number);
/* Calculate key component and item length to insert into S[i] */
set_le_ih_k_offset(ih, old_key_comp);
- put_ih_item_len(ih,
- old_len - sbytes[i]);
+ put_ih_item_len(ih, old_len - sbytes[i]);
tb->insert_size[0] -= sbytes[i];
} else { /* whole new item falls into S_new[i] */
/* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
- snum[i] - 1, sbytes[i],
- S_new[i]);
+ snum[i] - 1, sbytes[i], S_new[i]);
/* Insert new item into S_new[i] */
buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_insert_into_buf(&bi,
- item_pos - n +
- snum[i] - 1, ih,
- body, zeros_num);
+ leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1,
+ ih, body, zeros_num);
zeros_num = tb->insert_size[0] = 0;
}
@@ -1268,150 +892,73 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
int entry_count;
- entry_count =
- ih_entry_count(aux_ih);
+ entry_count = ih_entry_count(aux_ih);
- if (entry_count - sbytes[i] <
- pos_in_item
- && pos_in_item <=
- entry_count) {
+ if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) {
/* new directory entry falls into S_new[i] */
- RFALSE(!tb->
- insert_size[0],
- "PAP-12215: insert_size is already 0");
- RFALSE(sbytes[i] - 1 >=
- entry_count,
+ RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0");
+ RFALSE(sbytes[i] - 1 >= entry_count,
"PAP-12220: there are no so much entries (%d), only %d",
- sbytes[i] - 1,
- entry_count);
+ sbytes[i] - 1, entry_count);
/* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
- leaf_move_items
- (LEAF_FROM_S_TO_SNEW,
- tb, snum[i],
- sbytes[i] - 1,
- S_new[i]);
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]);
/* Paste given directory entry to directory item */
buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_paste_in_buffer
- (&bi, 0,
- pos_in_item -
- entry_count +
- sbytes[i] - 1,
- tb->insert_size[0],
- body, zeros_num);
+ leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
+ tb->insert_size[0], body, zeros_num);
/* paste new directory entry */
- leaf_paste_entries(&bi,
- 0,
- pos_in_item
- -
- entry_count
- +
- sbytes
- [i] -
- 1, 1,
- (struct
- reiserfs_de_head
- *)
- body,
- body
- +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
tb->insert_size[0] = 0;
pos_in_item++;
} else { /* new directory entry doesn't fall into S_new[i] */
- leaf_move_items
- (LEAF_FROM_S_TO_SNEW,
- tb, snum[i],
- sbytes[i],
- S_new[i]);
+ leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]);
}
} else { /* regular object */
- int n_shift, n_rem,
- r_zeros_number;
+ int n_shift, n_rem, r_zeros_number;
const char *r_body;
- RFALSE(pos_in_item !=
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos))
- || tb->insert_size[0] <=
- 0,
+ RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0,
"PAP-12225: item too short or insert_size <= 0");
/* Calculate number of bytes which must be shifted from appended item */
- n_shift =
- sbytes[i] -
- tb->insert_size[0];
+ n_shift = sbytes[i] - tb->insert_size[0];
if (n_shift < 0)
n_shift = 0;
- leaf_move_items
- (LEAF_FROM_S_TO_SNEW, tb,
- snum[i], n_shift,
- S_new[i]);
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
/* Calculate number of bytes which must remain in body after append to S_new[i] */
- n_rem =
- tb->insert_size[0] -
- sbytes[i];
+ n_rem = tb->insert_size[0] - sbytes[i];
if (n_rem < 0)
n_rem = 0;
/* Append part of body into S_new[0] */
buffer_info_init_bh(tb, &bi, S_new[i]);
if (n_rem > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + n_rem -
- zeros_num;
+ r_body = body + n_rem - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - n_rem;
- zeros_num -=
- r_zeros_number;
+ r_zeros_number = zeros_num - n_rem;
+ zeros_num -= r_zeros_number;
}
- leaf_paste_in_buffer(&bi, 0,
- n_shift,
- tb->
- insert_size
- [0] -
- n_rem,
- r_body,
- r_zeros_number);
+ leaf_paste_in_buffer(&bi, 0, n_shift,
+ tb->insert_size[0] - n_rem,
+ r_body, r_zeros_number);
{
struct item_head *tmp;
- tmp =
- B_N_PITEM_HEAD(S_new
- [i],
- 0);
+ tmp = B_N_PITEM_HEAD(S_new[i], 0);
if (is_indirect_le_ih
(tmp)) {
- set_ih_free_space
- (tmp, 0);
- set_le_ih_k_offset
- (tmp,
- le_ih_k_offset
- (tmp) +
- (n_rem <<
- (tb->
- tb_sb->
- s_blocksize_bits
- -
- UNFM_P_SHIFT)));
+ set_ih_free_space(tmp, 0);
+ set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
} else {
- set_le_ih_k_offset
- (tmp,
- le_ih_k_offset
- (tmp) +
- n_rem);
+ set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem);
}
}
@@ -1426,8 +973,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
struct item_head *pasted;
#ifdef CONFIG_REISERFS_CHECK
- struct item_head *ih_check =
- B_N_PITEM_HEAD(tbS0, item_pos);
+ struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos);
if (!is_direntry_le_ih(ih_check)
&& (pos_in_item != ih_item_len(ih_check)
@@ -1439,8 +985,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
"to ih_item_len");
#endif /* CONFIG_REISERFS_CHECK */
- leaf_mi =
- leaf_move_items(LEAF_FROM_S_TO_SNEW,
+ leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW,
tb, snum[i],
sbytes[i],
S_new[i]);
@@ -1452,30 +997,19 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* paste into item */
buffer_info_init_bh(tb, &bi, S_new[i]);
leaf_paste_in_buffer(&bi,
- item_pos - n +
- snum[i],
+ item_pos - n + snum[i],
pos_in_item,
tb->insert_size[0],
body, zeros_num);
- pasted =
- B_N_PITEM_HEAD(S_new[i],
- item_pos - n +
- snum[i]);
+ pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
if (is_direntry_le_ih(pasted)) {
leaf_paste_entries(&bi,
- item_pos -
- n + snum[i],
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
+ item_pos - n + snum[i],
+ pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE,
+ tb->insert_size[0]
);
}
@@ -1495,11 +1029,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
default: /* cases d and t */
reiserfs_panic(tb->tb_sb, "PAP-12245",
"blknum > 2: unexpected mode: %s(%d)",
- (flag ==
- M_DELETE) ? "DELETE" : ((flag ==
- M_CUT) ? "CUT"
- : "UNKNOWN"),
- flag);
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
}
memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
@@ -1524,9 +1054,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* If we insert the first key change the delimiting key */
if (item_pos == 0) {
if (tb->CFL[0]) /* can be 0 in reiserfsck */
- replace_key(tb, tb->CFL[0], tb->lkey[0],
- tbS0, 0);
-
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
}
break;
@@ -1536,53 +1064,27 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
pasted = B_N_PITEM_HEAD(tbS0, item_pos);
/* when directory, may be new entry already pasted */
if (is_direntry_le_ih(pasted)) {
- if (pos_in_item >= 0 &&
- pos_in_item <=
- ih_entry_count(pasted)) {
+ if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) {
RFALSE(!tb->insert_size[0],
"PAP-12260: insert_size is 0 already");
/* prepare space */
buffer_info_init_tbS0(tb, &bi);
- leaf_paste_in_buffer(&bi,
- item_pos,
- pos_in_item,
- tb->
- insert_size
- [0], body,
+ leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
+ tb->insert_size[0], body,
zeros_num);
/* paste entry */
- leaf_paste_entries(&bi,
- item_pos,
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, item_pos, pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE,
+ tb->insert_size[0]);
if (!item_pos && !pos_in_item) {
- RFALSE(!tb->CFL[0]
- || !tb->L[0],
+ RFALSE(!tb->CFL[0] || !tb->L[0],
"PAP-12270: CFL[0]/L[0] must be specified");
- if (tb->CFL[0]) {
- replace_key(tb,
- tb->
- CFL
- [0],
- tb->
- lkey
- [0],
- tbS0,
- 0);
-
- }
+ if (tb->CFL[0])
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
}
tb->insert_size[0] = 0;
}
@@ -1593,13 +1095,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
"PAP-12275: insert size must not be %d",
tb->insert_size[0]);
buffer_info_init_tbS0(tb, &bi);
- leaf_paste_in_buffer(&bi,
- item_pos,
- pos_in_item,
- tb->
- insert_size
- [0], body,
- zeros_num);
+ leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
+ tb->insert_size[0], body, zeros_num);
if (is_indirect_le_ih(pasted)) {
#if 0
@@ -1611,8 +1108,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
tb->
insert_size[0]);
#endif
- set_ih_free_space
- (pasted, 0);
+ set_ih_free_space(pasted, 0);
}
tb->insert_size[0] = 0;
}
@@ -1620,8 +1116,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
else {
if (tb->insert_size[0]) {
print_cur_tb("12285");
- reiserfs_panic(tb->
- tb_sb,
+ reiserfs_panic(tb->tb_sb,
"PAP-12285",
"insert_size "
"must be 0 "
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8e4f41d9af4d..34c7bdc06014 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte)
}
#endif
+#ifndef ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t ptent = *ptep;
+
+ ptent = pte_mknuma(ptent);
+ set_pte_at(mm, addr, ptep, ptent);
+ return;
+}
+#endif
+
#ifndef pmd_mknuma
static inline pmd_t pmd_mknuma(pmd_t pmd)
{
@@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
#endif
+
+#ifndef pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd = pmd_mknuma(pmd);
+ set_pmd_at(mm, addr, pmdp, pmd);
+ return;
+}
+#endif
#else
extern int pte_numa(pte_t pte);
extern int pmd_numa(pmd_t pmd);
@@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte);
extern pmd_t pmd_mknonnuma(pmd_t pmd);
extern pte_t pte_mknuma(pte_t pte);
extern pmd_t pmd_mknuma(pmd_t pmd);
+extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
#else
static inline int pmd_numa(pmd_t pmd)
@@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return;
+}
+
+
static inline pmd_t pmd_mknuma(pmd_t pmd)
{
return pmd;
}
+
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ return ;
+}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_MMU */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 04086c5be930..04a7f31301f8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -199,6 +199,9 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_INFO_ONCE(fmt, ...) \
+ printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
/**
* Debug output.
*
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 71727b6210ae..8f3dee097579 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -907,6 +907,9 @@ struct drm_mode_config {
/* whether async page flip is supported or not */
bool async_page_flip;
+
+ /* cursor size */
+ uint32_t cursor_width, cursor_height;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index d1f61bfe0ebe..49a828425fa2 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -29,6 +29,8 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_memory.h>
+struct device;
+
/**
* Initialize pool allocator.
*/
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 70654521dab6..5a4d39b4686b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio)
struct bio_vec bv;
struct bvec_iter iter;
+ /*
+ * We special case discard/write same, because they interpret bi_size
+ * differently:
+ */
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
bio_for_each_segment(bv, bio, iter)
segs++;
@@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set *fs_bio_set;
+unsigned int bio_integrity_tag_size(struct bio *bio);
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 161b23105b1e..18ba8a627f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -83,6 +83,8 @@ struct blk_mq_ops {
*/
rq_timed_out_fn *timeout;
+ softirq_done_fn *complete;
+
/*
* Override for hctx allocations (should probably go)
*/
@@ -119,11 +121,12 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_insert_request(struct request_queue *, struct request *,
+ bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
@@ -133,6 +136,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq);
+
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8678c4322b44..4afa4f8f6090 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -98,7 +98,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- struct work_struct mq_flush_data;
+ struct work_struct mq_flush_work;
};
struct request_queue *q;
@@ -448,13 +448,8 @@ struct request_queue {
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
- union {
- struct request flush_rq;
- struct {
- spinlock_t mq_flush_lock;
- struct work_struct mq_flush_work;
- };
- };
+ struct request *flush_rq;
+ spinlock_t mq_flush_lock;
struct mutex sysfs_lock;
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 677b4f01b2d0..6f76277baf39 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,10 +13,17 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7439 0x600d8480
+#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7XXX_28 0x600d8400
+
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
#define PHY_BCM_OUI_2 0x0143bc00
#define PHY_BCM_OUI_3 0x03625c00
+#define PHY_BCM_OUI_4 0x600d0000
+#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
@@ -31,6 +38,59 @@
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+/* Broadcom BCM7xxx specific workarounds */
+#define PHY_BRCM_100MBPS_WAR 0x00010000
#define PHY_BCM_FLAGS_VALID 0x80000000
+/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
+#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
+#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
+#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+
+#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
+#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
+
+#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
+#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
+
+#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
+#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
+#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
+#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
+#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
+#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
+#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
+#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
+#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
+#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
+#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
+#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
+#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
+#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
+#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
+#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
+#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
+#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
+
+#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
+#define MII_BCM54XX_SHD_WRITE 0x8000
+#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
+#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
+
+/*
+ * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
+ */
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
+#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+
+#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
+
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index fb0ab651a041..dc5f9026b67f 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -124,6 +124,8 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 2623cffc73a1..25bfb0eff772 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op);
/*
* Ceph setxattr request flags.
*/
-#define CEPH_XATTR_CREATE 1
-#define CEPH_XATTR_REPLACE 2
+#define CEPH_XATTR_CREATE (1 << 0)
+#define CEPH_XATTR_REPLACE (1 << 1)
+#define CEPH_XATTR_REMOVE (1 << 31)
union ceph_mds_request_args {
struct {
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index ded429966c1f..2507fd2a1eb4 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -75,11 +75,7 @@
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
-#if GCC_VERSION <= 40801
-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-#else
-# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
-#endif
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index dfac5ed31120..f886985a28b2 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -171,7 +171,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *);
#define dma_buf_export(priv, ops, size, flags) \
- dma_buf_export_named(priv, ops, size, flags, __FILE__)
+ dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c8e3e7e39c6b..0a114d05f68d 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -183,6 +183,9 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* hold the RTNL lock.
*
* See the structures used by these operations for further documentation.
+ * Note that for all operations using a structure ending with a zero-
+ * length array, the array is allocated separately in the kernel and
+ * is passed to the driver as an additional parameter.
*
* See &struct net_device and &struct net_device_ops for documentation
* of the generic netdev features interface.
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 4d34dbbbad4d..7a8144fef406 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -4,8 +4,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
-#ifdef CONFIG_GPIOLIB
-
struct device;
struct gpio_chip;
@@ -18,6 +16,8 @@ struct gpio_chip;
*/
struct gpio_desc;
+#ifdef CONFIG_GPIOLIB
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 15da677478dd..344883dce584 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -875,7 +875,7 @@ struct vmbus_channel_relid_released {
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
- u32 padding2;
+ u32 target_vcpu; /* The VCPU the host should respond to */
u64 interrupt_page;
u64 monitor_page1;
u64 monitor_page2;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index e526a8cecb70..5f349355ee54 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -597,6 +597,20 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @fc: frame control field in little-endian byteorder
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+{
+ /* IEEE 802.11-2012, definition of "bufferable management frame";
+ * note that this ignores the IBSS special case. */
+ return ieee80211_is_mgmt(fc) &&
+ (ieee80211_is_action(fc) ||
+ ieee80211_is_disassoc(fc) ||
+ ieee80211_is_deauth(fc));
+}
+
+/**
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
*/
@@ -1636,51 +1650,22 @@ enum ieee80211_reasoncode {
enum ieee80211_eid {
WLAN_EID_SSID = 0,
WLAN_EID_SUPP_RATES = 1,
- WLAN_EID_FH_PARAMS = 2,
+ WLAN_EID_FH_PARAMS = 2, /* reserved now */
WLAN_EID_DS_PARAMS = 3,
WLAN_EID_CF_PARAMS = 4,
WLAN_EID_TIM = 5,
WLAN_EID_IBSS_PARAMS = 6,
- WLAN_EID_CHALLENGE = 16,
-
WLAN_EID_COUNTRY = 7,
WLAN_EID_HP_PARAMS = 8,
WLAN_EID_HP_TABLE = 9,
WLAN_EID_REQUEST = 10,
-
WLAN_EID_QBSS_LOAD = 11,
WLAN_EID_EDCA_PARAM_SET = 12,
WLAN_EID_TSPEC = 13,
WLAN_EID_TCLAS = 14,
WLAN_EID_SCHEDULE = 15,
- WLAN_EID_TS_DELAY = 43,
- WLAN_EID_TCLAS_PROCESSING = 44,
- WLAN_EID_QOS_CAPA = 46,
- /* 802.11z */
- WLAN_EID_LINK_ID = 101,
- /* 802.11s */
- WLAN_EID_MESH_CONFIG = 113,
- WLAN_EID_MESH_ID = 114,
- WLAN_EID_LINK_METRIC_REPORT = 115,
- WLAN_EID_CONGESTION_NOTIFICATION = 116,
- WLAN_EID_PEER_MGMT = 117,
- WLAN_EID_CHAN_SWITCH_PARAM = 118,
- WLAN_EID_MESH_AWAKE_WINDOW = 119,
- WLAN_EID_BEACON_TIMING = 120,
- WLAN_EID_MCCAOP_SETUP_REQ = 121,
- WLAN_EID_MCCAOP_SETUP_RESP = 122,
- WLAN_EID_MCCAOP_ADVERT = 123,
- WLAN_EID_MCCAOP_TEARDOWN = 124,
- WLAN_EID_GANN = 125,
- WLAN_EID_RANN = 126,
- WLAN_EID_PREQ = 130,
- WLAN_EID_PREP = 131,
- WLAN_EID_PERR = 132,
- WLAN_EID_PXU = 137,
- WLAN_EID_PXUC = 138,
- WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
- WLAN_EID_MIC = 140,
-
+ WLAN_EID_CHALLENGE = 16,
+ /* 17-31 reserved for challenge text extension */
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,
WLAN_EID_TPC_REQUEST = 34,
@@ -1691,66 +1676,114 @@ enum ieee80211_eid {
WLAN_EID_MEASURE_REPORT = 39,
WLAN_EID_QUIET = 40,
WLAN_EID_IBSS_DFS = 41,
-
WLAN_EID_ERP_INFO = 42,
- WLAN_EID_EXT_SUPP_RATES = 50,
-
+ WLAN_EID_TS_DELAY = 43,
+ WLAN_EID_TCLAS_PROCESSING = 44,
WLAN_EID_HT_CAPABILITY = 45,
- WLAN_EID_HT_OPERATION = 61,
- WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
-
+ WLAN_EID_QOS_CAPA = 46,
+ /* 47 reserved for Broadcom */
WLAN_EID_RSN = 48,
- WLAN_EID_MMIE = 76,
- WLAN_EID_VENDOR_SPECIFIC = 221,
- WLAN_EID_QOS_PARAMETER = 222,
-
+ WLAN_EID_802_15_COEX = 49,
+ WLAN_EID_EXT_SUPP_RATES = 50,
WLAN_EID_AP_CHAN_REPORT = 51,
WLAN_EID_NEIGHBOR_REPORT = 52,
WLAN_EID_RCPI = 53,
+ WLAN_EID_MOBILITY_DOMAIN = 54,
+ WLAN_EID_FAST_BSS_TRANSITION = 55,
+ WLAN_EID_TIMEOUT_INTERVAL = 56,
+ WLAN_EID_RIC_DATA = 57,
+ WLAN_EID_DSE_REGISTERED_LOCATION = 58,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
+ WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+ WLAN_EID_HT_OPERATION = 61,
+ WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
WLAN_EID_BSS_AVG_ACCESS_DELAY = 63,
WLAN_EID_ANTENNA_INFO = 64,
WLAN_EID_RSNI = 65,
WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66,
WLAN_EID_BSS_AVAILABLE_CAPACITY = 67,
WLAN_EID_BSS_AC_ACCESS_DELAY = 68,
+ WLAN_EID_TIME_ADVERTISEMENT = 69,
WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
WLAN_EID_MULTIPLE_BSSID = 71,
WLAN_EID_BSS_COEX_2040 = 72,
WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74,
- WLAN_EID_EXT_CAPABILITY = 127,
-
- WLAN_EID_MOBILITY_DOMAIN = 54,
- WLAN_EID_FAST_BSS_TRANSITION = 55,
- WLAN_EID_TIMEOUT_INTERVAL = 56,
- WLAN_EID_RIC_DATA = 57,
WLAN_EID_RIC_DESCRIPTOR = 75,
-
- WLAN_EID_DSE_REGISTERED_LOCATION = 58,
- WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
- WLAN_EID_EXT_CHANSWITCH_ANN = 60,
-
- WLAN_EID_VHT_CAPABILITY = 191,
- WLAN_EID_VHT_OPERATION = 192,
- WLAN_EID_OPMODE_NOTIF = 199,
- WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
- WLAN_EID_EXTENDED_BSS_LOAD = 193,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
- WLAN_EID_AID = 197,
- WLAN_EID_QUIET_CHANNEL = 198,
-
- /* 802.11ad */
+ WLAN_EID_MMIE = 76,
+ WLAN_EID_ASSOC_COMEBACK_TIME = 77,
+ WLAN_EID_EVENT_REQUEST = 78,
+ WLAN_EID_EVENT_REPORT = 79,
+ WLAN_EID_DIAGNOSTIC_REQUEST = 80,
+ WLAN_EID_DIAGNOSTIC_REPORT = 81,
+ WLAN_EID_LOCATION_PARAMS = 82,
WLAN_EID_NON_TX_BSSID_CAP = 83,
+ WLAN_EID_SSID_LIST = 84,
+ WLAN_EID_MULTI_BSSID_IDX = 85,
+ WLAN_EID_FMS_DESCRIPTOR = 86,
+ WLAN_EID_FMS_REQUEST = 87,
+ WLAN_EID_FMS_RESPONSE = 88,
+ WLAN_EID_QOS_TRAFFIC_CAPA = 89,
+ WLAN_EID_BSS_MAX_IDLE_PERIOD = 90,
+ WLAN_EID_TSF_REQUEST = 91,
+ WLAN_EID_TSF_RESPOSNE = 92,
+ WLAN_EID_WNM_SLEEP_MODE = 93,
+ WLAN_EID_TIM_BCAST_REQ = 94,
+ WLAN_EID_TIM_BCAST_RESP = 95,
+ WLAN_EID_COLL_IF_REPORT = 96,
+ WLAN_EID_CHANNEL_USAGE = 97,
+ WLAN_EID_TIME_ZONE = 98,
+ WLAN_EID_DMS_REQUEST = 99,
+ WLAN_EID_DMS_RESPONSE = 100,
+ WLAN_EID_LINK_ID = 101,
+ WLAN_EID_WAKEUP_SCHEDUL = 102,
+ /* 103 reserved */
+ WLAN_EID_CHAN_SWITCH_TIMING = 104,
+ WLAN_EID_PTI_CONTROL = 105,
+ WLAN_EID_PU_BUFFER_STATUS = 106,
+ WLAN_EID_INTERWORKING = 107,
+ WLAN_EID_ADVERTISEMENT_PROTOCOL = 108,
+ WLAN_EID_EXPEDITED_BW_REQ = 109,
+ WLAN_EID_QOS_MAP_SET = 110,
+ WLAN_EID_ROAMING_CONSORTIUM = 111,
+ WLAN_EID_EMERGENCY_ALERT = 112,
+ WLAN_EID_MESH_CONFIG = 113,
+ WLAN_EID_MESH_ID = 114,
+ WLAN_EID_LINK_METRIC_REPORT = 115,
+ WLAN_EID_CONGESTION_NOTIFICATION = 116,
+ WLAN_EID_PEER_MGMT = 117,
+ WLAN_EID_CHAN_SWITCH_PARAM = 118,
+ WLAN_EID_MESH_AWAKE_WINDOW = 119,
+ WLAN_EID_BEACON_TIMING = 120,
+ WLAN_EID_MCCAOP_SETUP_REQ = 121,
+ WLAN_EID_MCCAOP_SETUP_RESP = 122,
+ WLAN_EID_MCCAOP_ADVERT = 123,
+ WLAN_EID_MCCAOP_TEARDOWN = 124,
+ WLAN_EID_GANN = 125,
+ WLAN_EID_RANN = 126,
+ WLAN_EID_EXT_CAPABILITY = 127,
+ /* 128, 129 reserved for Agere */
+ WLAN_EID_PREQ = 130,
+ WLAN_EID_PREP = 131,
+ WLAN_EID_PERR = 132,
+ /* 133-136 reserved for Cisco */
+ WLAN_EID_PXU = 137,
+ WLAN_EID_PXUC = 138,
+ WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
+ WLAN_EID_MIC = 140,
+ WLAN_EID_DESTINATION_URI = 141,
+ WLAN_EID_UAPSD_COEX = 142,
WLAN_EID_WAKEUP_SCHEDULE = 143,
WLAN_EID_EXT_SCHEDULE = 144,
WLAN_EID_STA_AVAILABILITY = 145,
WLAN_EID_DMG_TSPEC = 146,
WLAN_EID_DMG_AT = 147,
WLAN_EID_DMG_CAP = 148,
+ /* 149-150 reserved for Cisco */
WLAN_EID_DMG_OPERATION = 151,
WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
WLAN_EID_DMG_BEAM_REFINEMENT = 153,
WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154,
+ /* 155-156 reserved for Cisco */
WLAN_EID_AWAKE_WINDOW = 157,
WLAN_EID_MULTI_BAND = 158,
WLAN_EID_ADDBA_EXT = 159,
@@ -1767,11 +1800,34 @@ enum ieee80211_eid {
WLAN_EID_MULTIPLE_MAC_ADDR = 170,
WLAN_EID_U_PID = 171,
WLAN_EID_DMG_LINK_ADAPT_ACK = 172,
+ /* 173 reserved for Symbol */
+ WLAN_EID_MCCAOP_ADV_OVERVIEW = 174,
WLAN_EID_QUIET_PERIOD_REQ = 175,
+ /* 176 reserved for Symbol */
WLAN_EID_QUIET_PERIOD_RESP = 177,
+ /* 178-179 reserved for Symbol */
+ /* 180 reserved for ISO/IEC 20011 */
WLAN_EID_EPAC_POLICY = 182,
WLAN_EID_CLISTER_TIME_OFF = 183,
+ WLAN_EID_INTER_AC_PRIO = 184,
+ WLAN_EID_SCS_DESCRIPTOR = 185,
+ WLAN_EID_QLOAD_REPORT = 186,
+ WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187,
+ WLAN_EID_HL_STREAM_ID = 188,
+ WLAN_EID_GCR_GROUP_ADDR = 189,
WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190,
+ WLAN_EID_VHT_CAPABILITY = 191,
+ WLAN_EID_VHT_OPERATION = 192,
+ WLAN_EID_EXTENDED_BSS_LOAD = 193,
+ WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
+ WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+ WLAN_EID_AID = 197,
+ WLAN_EID_QUIET_CHANNEL = 198,
+ WLAN_EID_OPMODE_NOTIF = 199,
+
+ WLAN_EID_VENDOR_SPECIFIC = 221,
+ WLAN_EID_QOS_PARAMETER = 222,
};
/* Action category code */
@@ -2192,10 +2248,10 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
}
/**
- * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
+ * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
*/
-static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
+static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
if (ieee80211_is_disassoc(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control))
@@ -2224,6 +2280,17 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
+ * @skb: the skb containing the frame, length will be checked
+ */
+static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
+{
+ if (skb->len < 25)
+ return false;
+ return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
+}
+
+/**
* ieee80211_is_public_action - check if frame is a public action frame
* @hdr: the frame
* @len: length of the frame
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0053adde0ed9..a2678d35b5a2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -158,6 +158,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
devname, dev_id);
}
+extern int __must_check
+devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id);
+
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
/*
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 554548cd3dd4..130bc8d77fa5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -38,8 +38,10 @@
#include <linux/pci.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
+
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -227,6 +229,7 @@ struct mlx5_uuar_info {
* protect uuar allocation data structs
*/
struct mutex lock;
+ u32 ver;
};
struct mlx5_bf {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 440a02ee6f92..5e4756553c18 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -752,6 +752,9 @@ struct netdev_phys_port_id {
unsigned char id_len;
};
+typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
+ struct sk_buff *skb);
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -783,7 +786,7 @@ struct netdev_phys_port_id {
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv);
+ * void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
@@ -1005,7 +1008,8 @@ struct net_device_ops {
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv,
+ select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1312,13 +1316,7 @@ struct net_device {
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
- unsigned long last_rx; /* Time of last Rx
- * This should not be set in
- * drivers, unless really needed,
- * because network stack (bonding)
- * use it if/when necessary, to
- * avoid dirtying this cache line.
- */
+ unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr; /* hw address, (before bcast
@@ -1551,7 +1549,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
-u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
* Net namespace inlines
@@ -1726,6 +1723,20 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
};
+#define netdev_alloc_pcpu_stats(type) \
+({ \
+ typeof(type) *pcpu_stats = alloc_percpu(type); \
+ if (pcpu_stats) { \
+ int i; \
+ for_each_possible_cpu(i) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, i); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -2276,6 +2287,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
}
/**
+ * netdev_cap_txqueue - check if selected tx queue exceeds device queues
+ * @dev: network device
+ * @queue_index: given tx queue index
+ *
+ * Returns 0 if given tx queue index >= number of device tx queues,
+ * otherwise returns the originally passed tx queue index.
+ */
+static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
+{
+ if (unlikely(queue_index >= dev->real_num_tx_queues)) {
+ net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
+ return 0;
+ }
+
+ return queue_index;
+}
+
+/**
* netif_running - test if up
* @dev: network device
*
@@ -3068,7 +3099,12 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-netdev_features_t netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ const struct net_device *dev);
+static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
+{
+ return netif_skb_dev_features(skb, skb->dev);
+}
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index fd4f2d1cdf6c..e110b8c266f5 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -70,6 +70,16 @@ enum {
IEEE802154_ATTR_PHY_NAME,
IEEE802154_ATTR_DEV_TYPE,
+ IEEE802154_ATTR_TXPOWER,
+ IEEE802154_ATTR_LBT_ENABLED,
+ IEEE802154_ATTR_CCA_MODE,
+ IEEE802154_ATTR_CCA_ED_LEVEL,
+ IEEE802154_ATTR_CSMA_RETRIES,
+ IEEE802154_ATTR_CSMA_MIN_BE,
+ IEEE802154_ATTR_CSMA_MAX_BE,
+
+ IEEE802154_ATTR_FRAME_RETRIES,
+
__IEEE802154_ATTR_MAX,
};
@@ -122,6 +132,8 @@ enum {
IEEE802154_ADD_IFACE,
IEEE802154_DEL_IFACE,
+ IEEE802154_SET_PHYPARAMS,
+
__IEEE802154_CMD_MAX,
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 70c64ba17fa5..435cb995904d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
-#define for_each_node_by_name(dn, name) \
- for (dn = of_find_node_by_name(NULL, name); dn; \
- dn = of_find_node_by_name(dn, name))
extern struct device_node *of_find_node_by_type(struct device_node *from,
const char *type);
-#define for_each_node_by_type(dn, type) \
- for (dn = of_find_node_by_type(NULL, type); dn; \
- dn = of_find_node_by_type(dn, type))
extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
-#define for_each_compatible_node(dn, type, compatible) \
- for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
- dn = of_find_compatible_node(dn, type, compatible))
extern struct device_node *of_find_matching_node_and_match(
struct device_node *from,
const struct of_device_id *matches,
const struct of_device_id **match);
-static inline struct device_node *of_find_matching_node(
- struct device_node *from,
- const struct of_device_id *matches)
-{
- return of_find_matching_node_and_match(from, matches, NULL);
-}
-#define for_each_matching_node(dn, matches) \
- for (dn = of_find_matching_node(NULL, matches); dn; \
- dn = of_find_matching_node(dn, matches))
-#define for_each_matching_node_and_match(dn, matches, match) \
- for (dn = of_find_matching_node_and_match(NULL, matches, match); \
- dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
@@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child(
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
-#define for_each_child_of_node(parent, child) \
- for (child = of_get_next_child(parent, NULL); child != NULL; \
- child = of_get_next_child(parent, child))
-
-#define for_each_available_child_of_node(parent, child) \
- for (child = of_get_next_available_child(parent, NULL); child != NULL; \
- child = of_get_next_available_child(parent, child))
-
-static inline int of_get_child_count(const struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_child_of_node(np, child)
- num++;
-
- return num;
-}
-
-static inline int of_get_available_child_count(const struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- num++;
-
- return num;
-}
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
-#define for_each_node_with_property(dn, prop_name) \
- for (dn = of_find_node_with_property(NULL, prop_name); dn; \
- dn = of_find_node_with_property(dn, prop_name))
extern struct property *of_find_property(const struct device_node *np,
const char *name,
@@ -367,42 +315,53 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from,
return NULL;
}
-static inline struct device_node *of_get_parent(const struct device_node *node)
+static inline struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type)
{
return NULL;
}
-static inline bool of_have_populated_dt(void)
+static inline struct device_node *of_find_matching_node_and_match(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ const struct of_device_id **match)
{
- return false;
+ return NULL;
}
-/* Kill an unused variable warning on a device_node pointer */
-static inline void __of_use_dn(const struct device_node *np)
+static inline struct device_node *of_get_parent(const struct device_node *node)
{
+ return NULL;
}
-#define for_each_child_of_node(parent, child) \
- while (__of_use_dn(parent), __of_use_dn(child), 0)
+static inline struct device_node *of_get_next_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
-#define for_each_available_child_of_node(parent, child) \
- while (0)
+static inline struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
-static inline struct device_node *of_get_child_by_name(
- const struct device_node *node,
- const char *name)
+static inline struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name)
{
return NULL;
}
-static inline int of_get_child_count(const struct device_node *np)
+static inline bool of_have_populated_dt(void)
{
- return 0;
+ return false;
}
-static inline int of_get_available_child_count(const struct device_node *np)
+static inline struct device_node *of_get_child_by_name(
+ const struct device_node *node,
+ const char *name)
{
- return 0;
+ return NULL;
}
static inline int of_device_is_compatible(const struct device_node *device,
@@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np);
static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
+static inline struct device_node *of_find_matching_node(
+ struct device_node *from,
+ const struct of_device_id *matches)
+{
+ return of_find_matching_node_and_match(from, matches, NULL);
+}
+
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
@@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np,
s; \
s = of_prop_next_string(prop, s))
+#define for_each_node_by_name(dn, name) \
+ for (dn = of_find_node_by_name(NULL, name); dn; \
+ dn = of_find_node_by_name(dn, name))
+#define for_each_node_by_type(dn, type) \
+ for (dn = of_find_node_by_type(NULL, type); dn; \
+ dn = of_find_node_by_type(dn, type))
+#define for_each_compatible_node(dn, type, compatible) \
+ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+ dn = of_find_compatible_node(dn, type, compatible))
+#define for_each_matching_node(dn, matches) \
+ for (dn = of_find_matching_node(NULL, matches); dn; \
+ dn = of_find_matching_node(dn, matches))
+#define for_each_matching_node_and_match(dn, matches, match) \
+ for (dn = of_find_matching_node_and_match(NULL, matches, match); \
+ dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
+#define for_each_child_of_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+#define for_each_available_child_of_node(parent, child) \
+ for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ child = of_get_next_available_child(parent, child))
+
+#define for_each_node_with_property(dn, prop_name) \
+ for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+ dn = of_find_node_with_property(dn, prop_name))
+
+static inline int of_get_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d7dd6768cb7..ef370210ffb2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev,
static inline void of_device_node_put(struct device *dev) { }
-static inline const struct of_device_id *of_match_device(
+static inline const struct of_device_id *__of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
+#define of_match_device(matches, dev) \
+ __of_match_device(of_match_ptr(matches), (dev))
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 565188ca328f..24126c4b27b5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -74,8 +74,53 @@ typedef enum {
PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SMII,
PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
+/**
+ * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * into the device tree binding of 'phy-mode', so that Ethernet
+ * device driver can get phy interface from device tree.
+ */
+static inline const char *phy_modes(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_NA:
+ return "";
+ case PHY_INTERFACE_MODE_MII:
+ return "mii";
+ case PHY_INTERFACE_MODE_GMII:
+ return "gmii";
+ case PHY_INTERFACE_MODE_SGMII:
+ return "sgmii";
+ case PHY_INTERFACE_MODE_TBI:
+ return "tbi";
+ case PHY_INTERFACE_MODE_REVMII:
+ return "rev-mii";
+ case PHY_INTERFACE_MODE_RMII:
+ return "rmii";
+ case PHY_INTERFACE_MODE_RGMII:
+ return "rgmii";
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ return "rgmii-id";
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ return "rgmii-rxid";
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return "rgmii-txid";
+ case PHY_INTERFACE_MODE_RTBI:
+ return "rtbi";
+ case PHY_INTERFACE_MODE_SMII:
+ return "smii";
+ case PHY_INTERFACE_MODE_XGMII:
+ return "xgmii";
+ case PHY_INTERFACE_MODE_MOCA:
+ return "moca";
+ default:
+ return "unknown";
+ }
+}
+
#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
@@ -308,6 +353,7 @@ struct phy_device {
struct phy_c45_device_ids c45_ids;
bool is_c45;
bool is_internal;
+ bool has_fixups;
enum phy_state state;
@@ -394,6 +440,11 @@ struct phy_driver {
u32 flags;
/*
+ * Called to issue a PHY software reset
+ */
+ int (*soft_reset)(struct phy_device *phydev);
+
+ /*
* Called to initialize the PHY,
* including after a reset
*/
@@ -417,6 +468,9 @@ struct phy_driver {
*/
int (*config_aneg)(struct phy_device *phydev);
+ /* Determines the auto negotiation result */
+ int (*aneg_done)(struct phy_device *phydev);
+
/* Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
@@ -612,10 +666,12 @@ static inline int phy_read_status(struct phy_device *phydev)
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
+int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
+int genphy_soft_reset(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e273e5ac19c9..3f83459dbb20 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -146,7 +146,9 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
void phy_put(struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_simple_xlate(struct device *dev,
@@ -232,11 +234,23 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *phy_optional_get(struct device *dev,
+ const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_phy_optional_get(struct device *dev,
+ const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void phy_put(struct phy *phy)
{
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c574bf3bd6f6..11b6925f0e96 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2918,5 +2918,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
return !skb->head_frag || skb_cloned(skb);
}
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) -
+ skb_network_header(skb);
+ return hdr_len + skb_gso_transport_seglen(skb);
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a1d4ca290862..4203c66d8803 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -273,7 +273,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* message while queuing transfers that arrive in the meantime. When the
* driver is finished with this message, it must call
* spi_finalize_current_message() so the subsystem can issue the next
- * transfer
+ * message
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
@@ -287,7 +287,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer
+ * can issue the next transfer. Note: transfer_one and
+ * transfer_one_message are mutually exclusive; when both
+ * are set, the generic subsystem does not call your
+ * transfer_one callback.
* @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
diff --git a/include/linux/usb.h b/include/linux/usb.h
index c716da18c668..7f6eb859873e 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1265,8 +1265,6 @@ typedef void (*usb_complete_t)(struct urb *);
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
- * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
- * Do not use urb->sg for interrupt endpoints for now, only bulk.)
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 788d8378e587..3ee4c92afd1b 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -89,7 +89,7 @@ struct tc_action_ops {
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
- int (*cleanup)(struct tc_action *, int bind);
+ void (*cleanup)(struct tc_action *, int bind);
int (*lookup)(struct tc_action *, u32);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *act, int ovr,
@@ -98,20 +98,18 @@ struct tc_action_ops {
};
int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo);
+void tcf_hash_destroy(struct tc_action *a);
+int tcf_hash_release(struct tc_action *a, int bind);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
- int bind);
-struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size,
- int bind);
-void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_check(u32 index, struct tc_action *a, int bind);
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+ int size, int bind);
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
+void tcf_hash_insert(struct tc_action *a);
-int tcf_register_action(struct tc_action_ops *a);
+int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
int tcf_unregister_action(struct tc_action_ops *a);
-void tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res);
int tcf_action_init(struct net *net, struct nlattr *nla,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b1f84b05c67e..9f90554e88c4 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1394,10 +1394,12 @@ struct cfg80211_scan_request {
/**
* struct cfg80211_match_set - sets of attributes to match
*
- * @ssid: SSID to be matched
+ * @ssid: SSID to be matched; may be zero-length for no match (RSSI only)
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
+ s32 rssi_thold;
};
/**
@@ -1420,7 +1422,8 @@ struct cfg80211_match_set {
* @dev: the interface
* @scan_start: start time of the scheduled scan
* @channels: channels to scan
- * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ * @min_rssi_thold: for drivers only supporting a single threshold, this
+ * contains the minimum over all matchsets
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1433,7 +1436,7 @@ struct cfg80211_sched_scan_request {
u32 flags;
struct cfg80211_match_set *match_sets;
int n_match_sets;
- s32 rssi_thold;
+ s32 min_rssi_thold;
/* internal */
struct wiphy *wiphy;
@@ -1701,8 +1704,14 @@ struct cfg80211_ibss_params {
*
* @channel: The channel to use or %NULL if not specified (auto-select based
* on scan results)
+ * @channel_hint: The channel of the recommended BSS for initial connection or
+ * %NULL if not specified
* @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
* results)
+ * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
+ * %NULL if not specified. Unlike the @bssid parameter, the driver is
+ * allowed to ignore this @bssid_hint if it has knowledge of a better BSS
+ * to use.
* @ssid: SSID
* @ssid_len: Length of ssid in octets
* @auth_type: Authentication type (algorithm)
@@ -1725,11 +1734,13 @@ struct cfg80211_ibss_params {
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
- u8 *bssid;
- u8 *ssid;
+ struct ieee80211_channel *channel_hint;
+ const u8 *bssid;
+ const u8 *bssid_hint;
+ const u8 *ssid;
size_t ssid_len;
enum nl80211_auth_type auth_type;
- u8 *ie;
+ const u8 *ie;
size_t ie_len;
bool privacy;
enum nl80211_mfp mfp;
@@ -1768,6 +1779,7 @@ struct cfg80211_bitrate_mask {
u32 legacy;
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
} control[IEEE80211_NUM_BANDS];
};
/**
@@ -2875,6 +2887,11 @@ struct wiphy_vendor_command {
* @n_vendor_commands: number of vendor commands
* @vendor_events: array of vendor events supported by the hardware
* @n_vendor_events: number of vendor events
+ *
+ * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
+ * (including P2P GO) or 0 to indicate no such limit is advertised. The
+ * driver is allowed to advertise a theoretical limit that it can reach in
+ * some cases, but may not always reach.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -2990,6 +3007,8 @@ struct wiphy {
const struct nl80211_vendor_cmd_info *vendor_events;
int n_vendor_commands, n_vendor_events;
+ u16 max_ap_assoc_sta;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3127,8 +3146,8 @@ struct cfg80211_cached_keys;
* @identifier: (private) Identifier used in nl80211 to identify this
* wireless device if it has no netdev
* @current_bss: (private) Used by the internal configuration code
- * @channel: (private) Used by the internal configuration code to track
- * the user-set AP, monitor and WDS channel
+ * @chandef: (private) Used by the internal configuration code to track
+ * the user-set channel definition.
* @preset_chandef: (private) Used by the internal configuration code to
* track the channel to be used for AP later
* @bssid: (private) Used by the internal configuration code
@@ -3192,9 +3211,7 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss; /* associated / joined */
struct cfg80211_chan_def preset_chandef;
-
- /* for AP and mesh channel tracking */
- struct ieee80211_channel *channel;
+ struct cfg80211_chan_def chandef;
bool ibss_fixed;
bool ibss_dfs_possible;
@@ -3876,6 +3893,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
*
* @dev: network device
* @bssid: the BSSID of the IBSS joined
+ * @channel: the channel of the IBSS joined
* @gfp: allocation flags
*
* This function notifies cfg80211 that the device joined an IBSS or
@@ -3885,7 +3903,8 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
* with the locally generated beacon -- this guarantees that there is
* always a scan result for this IBSS. cfg80211 will handle the rest.
*/
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel, gfp_t gfp);
/**
* cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 8b5b71433297..b0fd9476c538 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -316,6 +316,10 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM 0x10
#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED 0x20
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER1 0x02
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
diff --git a/include/net/ip.h b/include/net/ip.h
index 23be0fd37937..4aa781b7f609 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -489,7 +489,8 @@ int ip_options_rcv_srr(struct sk_buff *skb);
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_cmsg_send(struct net *net, struct msghdr *msg,
+ struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index f4ab2fb4d50c..4f0f29dce0aa 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -808,9 +808,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
* @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
* @RX_FLAG_40MHZ: HT40 (40 MHz) was used
- * @RX_FLAG_80MHZ: 80 MHz was used
- * @RX_FLAG_80P80MHZ: 80+80 MHz was used
- * @RX_FLAG_160MHZ: 160 MHz was used
* @RX_FLAG_SHORT_GI: Short guard interval was used
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
@@ -830,6 +827,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* on this subframe
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_LDPC: LDPC was used
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
* @RX_FLAG_10MHZ: 10 MHz (half channel) was used
* @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -866,9 +864,7 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20),
RX_FLAG_MACTIME_END = BIT(21),
RX_FLAG_VHT = BIT(22),
- RX_FLAG_80MHZ = BIT(23),
- RX_FLAG_80P80MHZ = BIT(24),
- RX_FLAG_160MHZ = BIT(25),
+ RX_FLAG_LDPC = BIT(23),
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
@@ -878,6 +874,21 @@ enum mac80211_rx_flags {
#define RX_FLAG_STBC_SHIFT 26
/**
+ * enum mac80211_rx_vht_flags - receive VHT flags
+ *
+ * These flags are used with the @vht_flag member of
+ * &struct ieee80211_rx_status.
+ * @RX_VHT_FLAG_80MHZ: 80 MHz was used
+ * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used
+ * @RX_VHT_FLAG_160MHZ: 160 MHz was used
+ */
+enum mac80211_rx_vht_flags {
+ RX_VHT_FLAG_80MHZ = BIT(0),
+ RX_VHT_FLAG_80P80MHZ = BIT(1),
+ RX_VHT_FLAG_160MHZ = BIT(2),
+};
+
+/**
* struct ieee80211_rx_status - receive status
*
* The low-level driver should provide this information (the subset
@@ -902,26 +913,19 @@ enum mac80211_rx_flags {
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
* @vht_nss: number of streams (VHT only)
* @flag: %RX_FLAG_*
+ * @vht_flag: %RX_VHT_FLAG_*
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
- * @vendor_radiotap_bitmap: radiotap vendor namespace presence bitmap
- * @vendor_radiotap_len: radiotap vendor namespace length
- * @vendor_radiotap_align: radiotap vendor namespace alignment. Note
- * that the actual data must be at the start of the SKB data
- * already.
- * @vendor_radiotap_oui: radiotap vendor namespace OUI
- * @vendor_radiotap_subns: radiotap vendor sub namespace
*/
struct ieee80211_rx_status {
u64 mactime;
u32 device_timestamp;
u32 ampdu_reference;
u32 flag;
- u32 vendor_radiotap_bitmap;
- u16 vendor_radiotap_len;
u16 freq;
+ u8 vht_flag;
u8 rate_idx;
u8 vht_nss;
u8 rx_flags;
@@ -931,9 +935,6 @@ struct ieee80211_rx_status {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
- u8 vendor_radiotap_align;
- u8 vendor_radiotap_oui[3];
- u8 vendor_radiotap_subns;
};
/**
@@ -2750,11 +2751,13 @@ enum ieee80211_roc_type {
* @channel_switch_beacon: Starts a channel switch to a new channel.
* Beacons are modified to include CSA or ECSA IEs before calling this
* function. The corresponding count fields in these IEs must be
- * decremented, and when they reach zero the driver must call
+ * decremented, and when they reach 1 the driver must call
* ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
* get the csa counter decremented by mac80211, but must check if it is
- * zero using ieee80211_csa_is_complete() after the beacon has been
+ * 1 using ieee80211_csa_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
+ * If the CSA count starts as zero or 1, this function will not be called,
+ * since there won't be any time to beacon before the switch anyway.
*
* @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
* information in bss_conf is set up and the beacon can be retrieved. A
@@ -3452,13 +3455,13 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* After a channel switch announcement was scheduled and the counter in this
- * announcement hit zero, this function must be called by the driver to
+ * announcement hits 1, this function must be called by the driver to
* notify mac80211 that the channel can be changed.
*/
void ieee80211_csa_finish(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_is_complete - find out if counters reached zero
+ * ieee80211_csa_is_complete - find out if counters reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* This function returns whether the channel switch counters reached zero.
@@ -4451,7 +4454,6 @@ struct ieee80211_tx_rate_control {
};
struct rate_control_ops {
- struct module *module;
const char *name;
void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
void (*free)(void *priv);
@@ -4553,8 +4555,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta,
struct ieee80211_sta_rates *rates);
-int ieee80211_rate_control_register(struct rate_control_ops *ops);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
+int ieee80211_rate_control_register(const struct rate_control_ops *ops);
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops);
static inline bool
conf_is_ht20(struct ieee80211_conf *conf)
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 807d6b7a943f..8ca3d04e7558 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -113,6 +113,32 @@ struct ieee802154_dev {
* Set radio for listening on specific address.
* Set the device for listening on specified address.
* Returns either zero, or negative errno.
+ *
+ * set_txpower:
+ * Set radio transmit power in dB. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_lbt
+ * Enables or disables listen before talk on the device. Called with
+ * pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_mode
+ * Sets the CCA mode used by the device. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_ed_level
+ * Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ * held.
+ * Returns either zero, or negative errno.
+ *
+ * set_csma_params
+ * Sets the CSMA parameter set for the PHY. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_frame_retries
+ * Sets the retransmission attempt limit. Called with pib_lock held.
+ * Returns either zero, or negative errno.
*/
struct ieee802154_ops {
struct module *owner;
@@ -129,6 +155,15 @@ struct ieee802154_ops {
unsigned long changed);
int (*ieee_addr)(struct ieee802154_dev *dev,
u8 addr[IEEE802154_ADDR_LEN]);
+ int (*set_txpower)(struct ieee802154_dev *dev, int db);
+ int (*set_lbt)(struct ieee802154_dev *dev, bool on);
+ int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode);
+ int (*set_cca_ed_level)(struct ieee802154_dev *dev,
+ s32 level);
+ int (*set_csma_params)(struct ieee802154_dev *dev,
+ u8 min_be, u8 max_be, u8 retries);
+ int (*set_frame_retries)(struct ieee802154_dev *dev,
+ s8 retries);
};
/* Basic interface to register ieee802154 device */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 661e45d38051..72240e5ac2c4 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -140,7 +140,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
struct nlattr *tb[]);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
-extern const struct nla_policy ifla_policy[IFLA_MAX+1];
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index d992ca3145fe..6ee76c804893 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1653,17 +1653,6 @@ struct sctp_association {
/* This is the last advertised value of rwnd over a SACK chunk. */
__u32 a_rwnd;
- /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
- * to slop over a maximum of the association's frag_point.
- */
- __u32 rwnd_over;
-
- /* Keeps treack of rwnd pressure. This happens when we have
- * a window, but not recevie buffer (i.e small packets). This one
- * is releases slowly (1 PMTU at a time ).
- */
- __u32 rwnd_press;
-
/* This is the sndbuf size in use for the association.
* This corresponds to the sndbuf size for the association,
* as specified in the sk->sndbuf.
@@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old,
__u32 sctp_association_get_next_tsn(struct sctp_association *);
void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
-void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
-void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_update(struct sctp_association *, bool);
void sctp_assoc_set_primary(struct sctp_association *,
struct sctp_transport *);
void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9e8710be7a04..fa8f5fac65e9 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -9,7 +9,7 @@ struct tcf_csum {
u32 update_flags;
};
-#define to_tcf_csum(pc) \
- container_of(pc,struct tcf_csum,common)
+#define to_tcf_csum(a) \
+ container_of(a->priv,struct tcf_csum,common)
#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h
index 65f024b80958..9763dcbb9bc3 100644
--- a/include/net/tc_act/tc_defact.h
+++ b/include/net/tc_act/tc_defact.h
@@ -8,7 +8,7 @@ struct tcf_defact {
u32 tcfd_datalen;
void *tcfd_defdata;
};
-#define to_defact(pc) \
- container_of(pc, struct tcf_defact, common)
+#define to_defact(a) \
+ container_of(a->priv, struct tcf_defact, common)
#endif /* __NET_TC_DEF_H */
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9e3f6767b80e..9fc9b578908a 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -11,7 +11,7 @@ struct tcf_gact {
int tcfg_paction;
#endif
};
-#define to_gact(pc) \
- container_of(pc, struct tcf_gact, common)
+#define to_gact(a) \
+ container_of(a->priv, struct tcf_gact, common)
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
index f7d25dfcc4b7..c0f4193f432c 100644
--- a/include/net/tc_act/tc_ipt.h
+++ b/include/net/tc_act/tc_ipt.h
@@ -11,7 +11,7 @@ struct tcf_ipt {
char *tcfi_tname;
struct xt_entry_target *tcfi_t;
};
-#define to_ipt(pc) \
- container_of(pc, struct tcf_ipt, common)
+#define to_ipt(a) \
+ container_of(a->priv, struct tcf_ipt, common)
#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index cfe2943690ff..4dd77a1c106b 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -11,7 +11,7 @@ struct tcf_mirred {
struct net_device *tcfm_dev;
struct list_head tcfm_list;
};
-#define to_mirred(pc) \
- container_of(pc, struct tcf_mirred, common)
+#define to_mirred(a) \
+ container_of(a->priv, struct tcf_mirred, common)
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index 4a691f34d703..63d8e9ca9d99 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -13,9 +13,9 @@ struct tcf_nat {
u32 flags;
};
-static inline struct tcf_nat *to_tcf_nat(struct tcf_common *pc)
+static inline struct tcf_nat *to_tcf_nat(struct tc_action *a)
{
- return container_of(pc, struct tcf_nat, common);
+ return container_of(a->priv, struct tcf_nat, common);
}
#endif /* __NET_TC_NAT_H */
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index e6f6e15956f5..5b80998879c7 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -9,7 +9,7 @@ struct tcf_pedit {
unsigned char tcfp_flags;
struct tc_pedit_key *tcfp_keys;
};
-#define to_pedit(pc) \
- container_of(pc, struct tcf_pedit, common)
+#define to_pedit(a) \
+ container_of(a->priv, struct tcf_pedit, common)
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index dd5d86fab030..0df9a0db4a8e 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -29,7 +29,7 @@ struct tcf_skbedit {
u16 queue_mapping;
/* XXX: 16-bit pad here? */
};
-#define to_skbedit(pc) \
- container_of(pc, struct tcf_skbedit, common)
+#define to_skbedit(a) \
+ container_of(a->priv, struct tcf_skbedit, common)
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 56fc366da6d5..1f820537741a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -791,8 +791,6 @@ struct tcp_congestion_ops {
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
- /* lower bound for congestion window (optional) */
- u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
/* call before changing ca_state (optional) */
@@ -827,7 +825,6 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
extern struct tcp_congestion_ops tcp_init_congestion_ops;
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
-u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h
index b52bda8d13b1..10ab0fc6d4f7 100644
--- a/include/net/wpan-phy.h
+++ b/include/net/wpan-phy.h
@@ -37,15 +37,22 @@ struct wpan_phy {
struct mutex pib_lock;
/*
- * This is a PIB according to 802.15.4-2006.
+ * This is a PIB according to 802.15.4-2011.
* We do not provide timing-related variables, as they
* aren't used outside of driver
*/
u8 current_channel;
u8 current_page;
u32 channels_supported[32];
- u8 transmit_power;
+ s8 transmit_power;
u8 cca_mode;
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+ s32 cca_ed_level;
struct device dev;
int idx;
@@ -54,6 +61,14 @@ struct wpan_phy {
const char *name, int type);
void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
+ int (*set_txpower)(struct wpan_phy *phy, int db);
+ int (*set_lbt)(struct wpan_phy *phy, bool on);
+ int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode);
+ int (*set_cca_ed_level)(struct wpan_phy *phy, int level);
+ int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be,
+ u8 retries);
+ int (*set_frame_retries)(struct wpan_phy *phy, s8 retries);
+
char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
};
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8d4a1c06f7e4..6793f32ccb58 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -226,7 +226,8 @@ enum ib_port_cap_flags {
IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
- IB_PORT_CLIENT_REG_SUP = 1 << 25
+ IB_PORT_CLIENT_REG_SUP = 1 << 25,
+ IB_PORT_IP_BASED_GIDS = 1 << 26
};
enum ib_port_width {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c9c791209cd1..1772fadcff62 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -525,7 +525,6 @@ struct se_cmd {
#define CMD_T_COMPLETE (1 << 2)
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
-#define CMD_T_FAILED (1 << 6)
#define CMD_T_DEV_ACTIVE (1 << 7)
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 9e9475c85de5..e5bf9a76f169 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,7 +42,6 @@ TRACE_EVENT(pstate_sample,
u32 state,
u64 mperf,
u64 aperf,
- u32 energy,
u32 freq
),
@@ -51,7 +50,6 @@ TRACE_EVENT(pstate_sample,
state,
mperf,
aperf,
- energy,
freq
),
@@ -61,7 +59,6 @@ TRACE_EVENT(pstate_sample,
__field(u32, state)
__field(u64, mperf)
__field(u64, aperf)
- __field(u32, energy)
__field(u32, freq)
),
@@ -72,17 +69,15 @@ TRACE_EVENT(pstate_sample,
__entry->state = state;
__entry->mperf = mperf;
__entry->aperf = aperf;
- __entry->energy = energy;
__entry->freq = freq;
),
- TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ",
+ TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
(unsigned long)__entry->state,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
- (unsigned long)__entry->energy,
(unsigned long)__entry->freq
)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 3c9a833992e8..b06c8ed68707 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -619,6 +619,8 @@ struct drm_gem_open {
#define DRM_PRIME_CAP_EXPORT 0x2
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+#define DRM_CAP_CURSOR_WIDTH 0x8
+#define DRM_CAP_CURSOR_HEIGHT 0x9
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 9971c560ed9a..87792a5fee3b 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -87,6 +87,7 @@
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
+#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
/**
* struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 1b8a0f4c9590..b4d69092fbdb 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -558,7 +558,6 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code)
#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64)
#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
struct btrfs_ioctl_space_args)
-#define BTRFS_IOC_GLOBAL_RSV _IOR(BTRFS_IOCTL_MAGIC, 20, __u64)
#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64)
#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index e52958d7c2d1..5d9d1d140718 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -8,6 +8,38 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
#ifndef CAN_H
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 38dbafaa5341..fd161e91b6d7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -16,37 +16,97 @@
#include <linux/types.h>
#include <linux/if_ether.h>
-/* This should work for both 32 and 64 bit userland. */
+/* All structures exposed to userland should be defined such that they
+ * have the same layout for 32-bit and 64-bit userland.
+ */
+
+/**
+ * struct ethtool_cmd - link control and status
+ * @cmd: Command number = %ETHTOOL_GSET or %ETHTOOL_SSET
+ * @supported: Bitmask of %SUPPORTED_* flags for the link modes,
+ * physical connectors and other link features for which the
+ * interface supports autonegotiation or auto-detection.
+ * Read-only.
+ * @advertising: Bitmask of %ADVERTISED_* flags for the link modes,
+ * physical connectors and other link features that are
+ * advertised through autonegotiation or enabled for
+ * auto-detection.
+ * @speed: Low bits of the speed
+ * @duplex: Duplex mode; one of %DUPLEX_*
+ * @port: Physical connector type; one of %PORT_*
+ * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not
+ * applicable. For clause 45 PHYs this is the PRTAD.
+ * @transceiver: Historically used to distinguish different possible
+ * PHY types, but not in a consistent way. Deprecated.
+ * @autoneg: Enable/disable autonegotiation and auto-detection;
+ * either %AUTONEG_DISABLE or %AUTONEG_ENABLE
+ * @mdio_support: Bitmask of %ETH_MDIO_SUPPORTS_* flags for the MDIO
+ * protocols supported by the interface; 0 if unknown.
+ * Read-only.
+ * @maxtxpkt: Historically used to report TX IRQ coalescing; now
+ * obsoleted by &struct ethtool_coalesce. Read-only; deprecated.
+ * @maxrxpkt: Historically used to report RX IRQ coalescing; now
+ * obsoleted by &struct ethtool_coalesce. Read-only; deprecated.
+ * @speed_hi: High bits of the speed
+ * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of
+ * %ETH_TP_MDI_*. If the status is unknown or not applicable, the
+ * value will be %ETH_TP_MDI_INVALID. Read-only.
+ * @eth_tp_mdix_ctrl: Ethernet twisted pair MDI(-X) control; one of
+ * %ETH_TP_MDI_*. If MDI(-X) control is not implemented, reads
+ * yield %ETH_TP_MDI_INVALID and writes may be ignored or rejected.
+ * When written successfully, the link should be renegotiated if
+ * necessary.
+ * @lp_advertising: Bitmask of %ADVERTISED_* flags for the link modes
+ * and other link features that the link partner advertised
+ * through autonegotiation; 0 if unknown or not applicable.
+ * Read-only.
+ *
+ * The link speed in Mbps is split between @speed and @speed_hi. Use
+ * the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
+ * access it.
+ *
+ * If autonegotiation is disabled, the speed and @duplex represent the
+ * fixed link mode and are writable if the driver supports multiple
+ * link modes. If it is enabled then they are read-only; if the link
+ * is up they represent the negotiated link mode; if the link is down,
+ * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and
+ * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode.
+ *
+ * Some hardware interfaces may have multiple PHYs and/or physical
+ * connectors fitted or do not allow the driver to detect which are
+ * fitted. For these interfaces @port and/or @phy_address may be
+ * writable, possibly dependent on @autoneg being %AUTONEG_DISABLE.
+ * Otherwise, attempts to write different values may be ignored or
+ * rejected.
+ *
+ * Users should assume that all fields not marked read-only are
+ * writable and subject to validation by the driver. They should use
+ * %ETHTOOL_GSET to get the current values before making specific
+ * changes and then applying them with %ETHTOOL_SSET.
+ *
+ * Drivers that implement set_settings() should validate all fields
+ * other than @cmd that are not described as read-only or deprecated,
+ * and must ignore all fields described as read-only.
+ *
+ * Deprecated fields should be ignored by both users and drivers.
+ */
struct ethtool_cmd {
__u32 cmd;
- __u32 supported; /* Features this interface supports */
- __u32 advertising; /* Features this interface advertises */
- __u16 speed; /* The forced speed (lower bits) in
- * Mbps. Please use
- * ethtool_cmd_speed()/_set() to
- * access it */
- __u8 duplex; /* Duplex, half or full */
- __u8 port; /* Which connector port */
- __u8 phy_address; /* MDIO PHY address (PRTAD for clause 45).
- * May be read-only or read-write
- * depending on the driver.
- */
- __u8 transceiver; /* Which transceiver to use */
- __u8 autoneg; /* Enable or disable autonegotiation */
- __u8 mdio_support; /* MDIO protocols supported. Read-only.
- * Not set by all drivers.
- */
- __u32 maxtxpkt; /* Tx pkts before generating tx int */
- __u32 maxrxpkt; /* Rx pkts before generating rx int */
- __u16 speed_hi; /* The forced speed (upper
- * bits) in Mbps. Please use
- * ethtool_cmd_speed()/_set() to
- * access it */
- __u8 eth_tp_mdix; /* twisted pair MDI-X status */
- __u8 eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
- * link should be renegotiated if necessary
- */
- __u32 lp_advertising; /* Features the link partner advertises */
+ __u32 supported;
+ __u32 advertising;
+ __u16 speed;
+ __u8 duplex;
+ __u8 port;
+ __u8 phy_address;
+ __u8 transceiver;
+ __u8 autoneg;
+ __u8 mdio_support;
+ __u32 maxtxpkt;
+ __u32 maxrxpkt;
+ __u16 speed_hi;
+ __u8 eth_tp_mdix;
+ __u8 eth_tp_mdix_ctrl;
+ __u32 lp_advertising;
__u32 reserved[2];
};
@@ -79,37 +139,68 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
#define ETHTOOL_FWVERS_LEN 32
#define ETHTOOL_BUSINFO_LEN 32
-/* these strings are set to whatever the driver author decides... */
+
+/**
+ * struct ethtool_drvinfo - general driver and device information
+ * @cmd: Command number = %ETHTOOL_GDRVINFO
+ * @driver: Driver short name. This should normally match the name
+ * in its bus driver structure (e.g. pci_driver::name). Must
+ * not be an empty string.
+ * @version: Driver version string; may be an empty string
+ * @fw_version: Firmware version string; may be an empty string
+ * @bus_info: Device bus address. This should match the dev_name()
+ * string for the underlying bus device, if there is one. May be
+ * an empty string.
+ * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
+ * %ETHTOOL_SPFLAGS commands; also the number of strings in the
+ * %ETH_SS_PRIV_FLAGS set
+ * @n_stats: Number of u64 statistics returned by the %ETHTOOL_GSTATS
+ * command; also the number of strings in the %ETH_SS_STATS set
+ * @testinfo_len: Number of results returned by the %ETHTOOL_TEST
+ * command; also the number of strings in the %ETH_SS_TEST set
+ * @eedump_len: Size of EEPROM accessible through the %ETHTOOL_GEEPROM
+ * and %ETHTOOL_SEEPROM commands, in bytes
+ * @regdump_len: Size of register dump returned by the %ETHTOOL_GREGS
+ * command, in bytes
+ *
+ * Users can use the %ETHTOOL_GSSET_INFO command to get the number of
+ * strings in any string set (from Linux 2.6.34).
+ *
+ * Drivers should set at most @driver, @version, @fw_version and
+ * @bus_info in their get_drvinfo() implementation. The ethtool
+ * core fills in the other fields using other driver operations.
+ */
struct ethtool_drvinfo {
__u32 cmd;
- char driver[32]; /* driver short name, "tulip", "eepro100" */
- char version[32]; /* driver version string */
- char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */
- char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */
- /* For PCI devices, use pci_name(pci_dev). */
+ char driver[32];
+ char version[32];
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ char bus_info[ETHTOOL_BUSINFO_LEN];
char reserved1[32];
char reserved2[12];
- /*
- * Some struct members below are filled in
- * using ops->get_sset_count(). Obtaining
- * this info from ethtool_drvinfo is now
- * deprecated; Use ETHTOOL_GSSET_INFO
- * instead.
- */
- __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */
- __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */
+ __u32 n_priv_flags;
+ __u32 n_stats;
__u32 testinfo_len;
- __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */
- __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */
+ __u32 eedump_len;
+ __u32 regdump_len;
};
#define SOPASS_MAX 6
-/* wake-on-lan settings */
+
+/**
+ * struct ethtool_wolinfo - Wake-On-Lan configuration
+ * @cmd: Command number = %ETHTOOL_GWOL or %ETHTOOL_SWOL
+ * @supported: Bitmask of %WAKE_* flags for supported Wake-On-Lan modes.
+ * Read-only.
+ * @wolopts: Bitmask of %WAKE_* flags for enabled Wake-On-Lan modes.
+ * @sopass: SecureOn(tm) password; meaningful only if %WAKE_MAGICSECURE
+ * is set in @wolopts.
+ */
struct ethtool_wolinfo {
__u32 cmd;
__u32 supported;
__u32 wolopts;
- __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+ __u8 sopass[SOPASS_MAX];
};
/* for passing single values */
@@ -118,20 +209,51 @@ struct ethtool_value {
__u32 data;
};
-/* for passing big chunks of data */
+/**
+ * struct ethtool_regs - hardware register dump
+ * @cmd: Command number = %ETHTOOL_GREGS
+ * @version: Dump format version. This is driver-specific and may
+ * distinguish different chips/revisions. Drivers must use new
+ * version numbers whenever the dump format changes in an
+ * incompatible way.
+ * @len: On entry, the real length of @data. On return, the number of
+ * bytes used.
+ * @data: Buffer for the register dump
+ *
+ * Users should use %ETHTOOL_GDRVINFO to find the maximum length of
+ * a register dump for the interface. They must allocate the buffer
+ * immediately following this structure.
+ */
struct ethtool_regs {
__u32 cmd;
- __u32 version; /* driver-specific, indicates different chips/revs */
- __u32 len; /* bytes */
+ __u32 version;
+ __u32 len;
__u8 data[0];
};
-/* for passing EEPROM chunks */
+/**
+ * struct ethtool_eeprom - EEPROM dump
+ * @cmd: Command number = %ETHTOOL_GEEPROM, %ETHTOOL_GMODULEEEPROM or
+ * %ETHTOOL_SEEPROM
+ * @magic: A 'magic cookie' value to guard against accidental changes.
+ * The value passed in to %ETHTOOL_SEEPROM must match the value
+ * returned by %ETHTOOL_GEEPROM for the same device. This is
+ * unused when @cmd is %ETHTOOL_GMODULEEEPROM.
+ * @offset: Offset within the EEPROM to begin reading/writing, in bytes
+ * @len: On entry, number of bytes to read/write. On successful
+ * return, number of bytes actually read/written. In case of
+ * error, this may indicate at what point the error occurred.
+ * @data: Buffer to read/write from
+ *
+ * Users may use %ETHTOOL_GDRVINFO or %ETHTOOL_GMODULEINFO to find
+ * the length of an on-board or module EEPROM, respectively. They
+ * must allocate the buffer immediately following this structure.
+ */
struct ethtool_eeprom {
__u32 cmd;
__u32 magic;
- __u32 offset; /* in bytes */
- __u32 len; /* in bytes */
+ __u32 offset;
+ __u32 len;
__u8 data[0];
};
@@ -229,17 +351,18 @@ struct ethtool_modinfo {
* @rate_sample_interval: How often to do adaptive coalescing packet rate
* sampling, measured in seconds. Must not be zero.
*
- * Each pair of (usecs, max_frames) fields specifies this exit
- * condition for interrupt coalescing:
+ * Each pair of (usecs, max_frames) fields specifies that interrupts
+ * should be coalesced until
* (usecs > 0 && time_since_first_completion >= usecs) ||
* (max_frames > 0 && completed_frames >= max_frames)
+ *
* It is illegal to set both usecs and max_frames to zero as this
* would cause interrupts to never be generated. To disable
* coalescing, set usecs = 0 and max_frames = 1.
*
* Some implementations ignore the value of max_frames and use the
- * condition:
- * time_since_first_completion >= usecs
+ * condition time_since_first_completion >= usecs
+ *
* This is deprecated. Drivers for hardware that does not support
* counting completions should validate that max_frames == !rx_usecs.
*
@@ -279,22 +402,37 @@ struct ethtool_coalesce {
__u32 rate_sample_interval;
};
-/* for configuring RX/TX ring parameters */
+/**
+ * struct ethtool_ringparam - RX/TX ring parameters
+ * @cmd: Command number = %ETHTOOL_GRINGPARAM or %ETHTOOL_SRINGPARAM
+ * @rx_max_pending: Maximum supported number of pending entries per
+ * RX ring. Read-only.
+ * @rx_mini_max_pending: Maximum supported number of pending entries
+ * per RX mini ring. Read-only.
+ * @rx_jumbo_max_pending: Maximum supported number of pending entries
+ * per RX jumbo ring. Read-only.
+ * @tx_max_pending: Maximum supported number of pending entries per
+ * TX ring. Read-only.
+ * @rx_pending: Current maximum number of pending entries per RX ring
+ * @rx_mini_pending: Current maximum number of pending entries per RX
+ * mini ring
+ * @rx_jumbo_pending: Current maximum number of pending entries per RX
+ * jumbo ring
+ * @tx_pending: Current maximum supported number of pending entries
+ * per TX ring
+ *
+ * If the interface does not have separate RX mini and/or jumbo rings,
+ * @rx_mini_max_pending and/or @rx_jumbo_max_pending will be 0.
+ *
+ * There may also be driver-dependent minimum values for the number
+ * of entries per ring.
+ */
struct ethtool_ringparam {
- __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
-
- /* Read only attributes. These indicate the maximum number
- * of pending RX/TX ring entries the driver will allow the
- * user to set.
- */
+ __u32 cmd;
__u32 rx_max_pending;
__u32 rx_mini_max_pending;
__u32 rx_jumbo_max_pending;
__u32 tx_max_pending;
-
- /* Values changeable by the user. The valid values are
- * in the range 1 to the "*_max_pending" counterpart above.
- */
__u32 rx_pending;
__u32 rx_mini_pending;
__u32 rx_jumbo_pending;
@@ -329,51 +467,96 @@ struct ethtool_channels {
__u32 combined_count;
};
-/* for configuring link flow control parameters */
+/**
+ * struct ethtool_pauseparam - Ethernet pause (flow control) parameters
+ * @cmd: Command number = %ETHTOOL_GPAUSEPARAM or %ETHTOOL_SPAUSEPARAM
+ * @autoneg: Flag to enable autonegotiation of pause frame use
+ * @rx_pause: Flag to enable reception of pause frames
+ * @tx_pause: Flag to enable transmission of pause frames
+ *
+ * Drivers should reject a non-zero setting of @autoneg when
+ * autoneogotiation is disabled (or not supported) for the link.
+ *
+ * If the link is autonegotiated, drivers should use
+ * mii_advertise_flowctrl() or similar code to set the advertised
+ * pause frame capabilities based on the @rx_pause and @tx_pause flags,
+ * even if @autoneg is zero. They should also allow the advertised
+ * pause frame capabilities to be controlled directly through the
+ * advertising field of &struct ethtool_cmd.
+ *
+ * If @autoneg is non-zero, the MAC is configured to send and/or
+ * receive pause frames according to the result of autonegotiation.
+ * Otherwise, it is configured directly based on the @rx_pause and
+ * @tx_pause flags.
+ */
struct ethtool_pauseparam {
- __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
-
- /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
- * being true) the user may set 'autoneg' here non-zero to have the
- * pause parameters be auto-negotiated too. In such a case, the
- * {rx,tx}_pause values below determine what capabilities are
- * advertised.
- *
- * If 'autoneg' is zero or the link is not being auto-negotiated,
- * then {rx,tx}_pause force the driver to use/not-use pause
- * flow control.
- */
+ __u32 cmd;
__u32 autoneg;
__u32 rx_pause;
__u32 tx_pause;
};
#define ETH_GSTRING_LEN 32
+
+/**
+ * enum ethtool_stringset - string set ID
+ * @ETH_SS_TEST: Self-test result names, for use with %ETHTOOL_TEST
+ * @ETH_SS_STATS: Statistic names, for use with %ETHTOOL_GSTATS
+ * @ETH_SS_PRIV_FLAGS: Driver private flag names, for use with
+ * %ETHTOOL_GPFLAGS and %ETHTOOL_SPFLAGS
+ * @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE;
+ * now deprecated
+ * @ETH_SS_FEATURES: Device feature names
+ */
enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
- ETH_SS_NTUPLE_FILTERS, /* Do not use, GRXNTUPLE is now deprecated */
+ ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
};
-/* for passing string sets for data tagging */
+/**
+ * struct ethtool_gstrings - string set for data tagging
+ * @cmd: Command number = %ETHTOOL_GSTRINGS
+ * @string_set: String set ID; one of &enum ethtool_stringset
+ * @len: On return, the number of strings in the string set
+ * @data: Buffer for strings. Each string is null-padded to a size of
+ * %ETH_GSTRING_LEN.
+ *
+ * Users must use %ETHTOOL_GSSET_INFO to find the number of strings in
+ * the string set. They must allocate a buffer of the appropriate
+ * size immediately following this structure.
+ */
struct ethtool_gstrings {
- __u32 cmd; /* ETHTOOL_GSTRINGS */
- __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
- __u32 len; /* number of strings in the string set */
+ __u32 cmd;
+ __u32 string_set;
+ __u32 len;
__u8 data[0];
};
+/**
+ * struct ethtool_sset_info - string set information
+ * @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @sset_mask: On entry, a bitmask of string sets to query, with bits
+ * numbered according to &enum ethtool_stringset. On return, a
+ * bitmask of those string sets queried that are supported.
+ * @data: Buffer for string set sizes. On return, this contains the
+ * size of each string set that was queried and supported, in
+ * order of ID.
+ *
+ * Example: The user passes in @sset_mask = 0x7 (sets 0, 1, 2) and on
+ * return @sset_mask == 0x6 (sets 1, 2). Then @data[0] contains the
+ * size of set 1 and @data[1] contains the size of set 2.
+ *
+ * Users must allocate a buffer of the appropriate size (4 * number of
+ * sets queried) immediately following this structure.
+ */
struct ethtool_sset_info {
- __u32 cmd; /* ETHTOOL_GSSET_INFO */
+ __u32 cmd;
__u32 reserved;
- __u64 sset_mask; /* input: each bit selects an sset to query */
- /* output: each bit a returned sset */
- __u32 data[0]; /* ETH_SS_xxx count, in order, based on bits
- in sset_mask. One bit implies one
- __u32, two bits implies two
- __u32's, etc. */
+ __u64 sset_mask;
+ __u32 data[0];
};
/**
@@ -393,24 +576,58 @@ enum ethtool_test_flags {
ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3),
};
-/* for requesting NIC test and getting results*/
+/**
+ * struct ethtool_test - device self-test invocation
+ * @cmd: Command number = %ETHTOOL_TEST
+ * @flags: A bitmask of flags from &enum ethtool_test_flags. Some
+ * flags may be set by the user on entry; others may be set by
+ * the driver on return.
+ * @len: On return, the number of test results
+ * @data: Array of test results
+ *
+ * Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
+ * number of test results that will be returned. They must allocate a
+ * buffer of the appropriate size (8 * number of results) immediately
+ * following this structure.
+ */
struct ethtool_test {
- __u32 cmd; /* ETHTOOL_TEST */
- __u32 flags; /* ETH_TEST_FL_xxx */
+ __u32 cmd;
+ __u32 flags;
__u32 reserved;
- __u32 len; /* result length, in number of u64 elements */
+ __u32 len;
__u64 data[0];
};
-/* for dumping NIC-specific statistics */
+/**
+ * struct ethtool_stats - device-specific statistics
+ * @cmd: Command number = %ETHTOOL_GSTATS
+ * @n_stats: On return, the number of statistics
+ * @data: Array of statistics
+ *
+ * Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
+ * number of statistics that will be returned. They must allocate a
+ * buffer of the appropriate size (8 * number of statistics)
+ * immediately following this structure.
+ */
struct ethtool_stats {
- __u32 cmd; /* ETHTOOL_GSTATS */
- __u32 n_stats; /* number of u64's being returned */
+ __u32 cmd;
+ __u32 n_stats;
__u64 data[0];
};
+/**
+ * struct ethtool_perm_addr - permanent hardware address
+ * @cmd: Command number = %ETHTOOL_GPERMADDR
+ * @size: On entry, the size of the buffer. On return, the size of the
+ * address. The command fails if the buffer is too small.
+ * @data: Buffer for the address
+ *
+ * Users must allocate the buffer immediately following this structure.
+ * A buffer size of %MAX_ADDR_LEN should be sufficient for any address
+ * type.
+ */
struct ethtool_perm_addr {
- __u32 cmd; /* ETHTOOL_GPERMADDR */
+ __u32 cmd;
__u32 size;
__u8 data[0];
};
@@ -593,7 +810,7 @@ struct ethtool_rx_flow_spec {
* %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
* location, and may remove a rule at a later location (lower
* priority) that matches exactly the same set of flows. The special
- * values are: %RX_CLS_LOC_ANY, selecting any location;
+ * values are %RX_CLS_LOC_ANY, selecting any location;
* %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
* priority); and %RX_CLS_LOC_LAST, selecting the last suitable
* location (minimum priority). Additional special values may be
@@ -704,9 +921,6 @@ struct ethtool_flash {
* for %ETHTOOL_GET_DUMP_FLAG command
* @data: data collected for get dump data operation
*/
-
-#define ETH_FW_DUMP_DISABLE 0
-
struct ethtool_dump {
__u32 cmd;
__u32 version;
@@ -715,6 +929,8 @@ struct ethtool_dump {
__u8 data[0];
};
+#define ETH_FW_DUMP_DISABLE 0
+
/* for returning and changing feature sets */
/**
@@ -734,8 +950,9 @@ struct ethtool_get_features_block {
/**
* struct ethtool_gfeatures - command to get state of device's features
* @cmd: command number = %ETHTOOL_GFEATURES
- * @size: in: number of elements in the features[] array;
- * out: number of elements in features[] needed to hold all features
+ * @size: On entry, the number of elements in the features[] array;
+ * on return, the number of elements in features[] needed to hold
+ * all features
* @features: state of features
*/
struct ethtool_gfeatures {
@@ -905,7 +1122,6 @@ enum ethtool_sfeatures_retval_bits {
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
-/* Indicates what features are supported by the interface. */
#define SUPPORTED_10baseT_Half (1 << 0)
#define SUPPORTED_10baseT_Full (1 << 1)
#define SUPPORTED_100baseT_Half (1 << 2)
@@ -934,7 +1150,6 @@ enum ethtool_sfeatures_retval_bits {
#define SUPPORTED_40000baseSR4_Full (1 << 25)
#define SUPPORTED_40000baseLR4_Full (1 << 26)
-/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_100baseT_Half (1 << 2)
@@ -999,9 +1214,7 @@ enum ethtool_sfeatures_retval_bits {
#define XCVR_DUMMY2 0x03
#define XCVR_DUMMY3 0x04
-/* Enable or disable autonegotiation. If this is set to enable,
- * the forced link modes above are completely ignored.
- */
+/* Enable or disable autonegotiation. */
#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
index 7fabba5059cf..feb0b4c0814c 100644
--- a/include/uapi/linux/mic_ioctl.h
+++ b/include/uapi/linux/mic_ioctl.h
@@ -39,7 +39,7 @@ struct mic_copy_desc {
#else
struct iovec *iov;
#endif
- int iovcnt;
+ __u32 iovcnt;
__u8 vr_idx;
__u8 update_used;
__u32 out_len;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 91054fd660e0..a12e6cae5132 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -418,8 +418,18 @@
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
* IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
* %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
- * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
- * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
+ * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, %NL80211_ATTR_MAC_HINT, and
+ * %NL80211_ATTR_WIPHY_FREQ_HINT.
+ * If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are
+ * restrictions on BSS selection, i.e., they effectively prevent roaming
+ * within the ESS. %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT
+ * can be included to provide a recommendation of the initial BSS while
+ * allowing the driver to roam to other BSSes within the ESS and also to
+ * ignore this recommendation if the indicated BSS is not ideal. Only one
+ * set of BSSID,frequency parameters is used (i.e., either the enforcing
+ * %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
+ * %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
* Background scan period can optionally be
* specified in %NL80211_ATTR_BG_SCAN_PERIOD,
* if not specified default background scan configuration
@@ -1555,6 +1565,16 @@ enum nl80211_commands {
* data is in the format defined for the payload of the QoS Map Set element
* in IEEE Std 802.11-2012, 8.4.2.97.
*
+ * @NL80211_ATTR_MAC_HINT: MAC address recommendation as initial BSS
+ * @NL80211_ATTR_WIPHY_FREQ_HINT: frequency of the recommended initial BSS
+ *
+ * @NL80211_ATTR_MAX_AP_ASSOC_STA: Device attribute that indicates how many
+ * associated stations are supported in AP mode (including P2P GO); u32.
+ * Since drivers may not have a fixed limit on the maximum number (e.g.,
+ * other concurrent operations may affect this), drivers are allowed to
+ * advertise values that cannot always be met. In such cases, an attempt
+ * to add a new station entry with @NL80211_CMD_NEW_STATION may fail.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1883,6 +1903,11 @@ enum nl80211_attrs {
NL80211_ATTR_QOS_MAP,
+ NL80211_ATTR_MAC_HINT,
+ NL80211_ATTR_WIPHY_FREQ_HINT,
+
+ NL80211_ATTR_MAX_AP_ASSOC_STA,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2412,7 +2437,10 @@ enum nl80211_reg_type {
* in KHz. This is not a center a frequency but an actual regulatory
* band edge.
* @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
- * frequency range, in KHz.
+ * frequency range, in KHz. If not present or 0, maximum available
+ * bandwidth should be calculated base on contiguous rules and wider
+ * channels will be allowed to cross multiple contiguous/overlapping
+ * frequency ranges.
* @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
* for a given frequency range. The value is in mBi (100 * dBi).
* If you don't have one then don't send this.
@@ -2442,9 +2470,15 @@ enum nl80211_reg_rule_attr {
* enum nl80211_sched_scan_match_attr - scheduled scan match attributes
* @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
- * only report BSS with matching SSID.
+ * only report BSS with matching SSID.
* @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
- * BSS in scan results. Filtering is turned off if not specified.
+ * BSS in scan results. Filtering is turned off if not specified. Note that
+ * if this attribute is in a match set of its own, then it is treated as
+ * the default value for all matchsets with an SSID, rather than being a
+ * matchset of its own without an RSSI filter. This is due to problems with
+ * how this API was implemented in the past. Also, due to the same problem,
+ * the only way to create a matchset with only an RSSI filter (with this
+ * attribute) is if there's only a single matchset with the RSSI attribute.
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3131,6 +3165,7 @@ enum nl80211_key_attributes {
* in an array of MCS numbers.
* @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection,
* see &struct nl80211_txrate_vht
+ * @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -3139,6 +3174,7 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_LEGACY,
NL80211_TXRATE_HT,
NL80211_TXRATE_VHT,
+ NL80211_TXRATE_GI,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -3156,6 +3192,12 @@ struct nl80211_txrate_vht {
__u16 mcs[NL80211_VHT_NSS_MAX];
};
+enum nl80211_txrate_gi {
+ NL80211_TXRATE_DEFAULT_GI,
+ NL80211_TXRATE_FORCE_SGI,
+ NL80211_TXRATE_FORCE_LGI,
+};
+
/**
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 377f1e59411d..3b9718328d8b 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -186,6 +186,9 @@ struct tcp_info {
__u32 tcpi_rcv_space;
__u32 tcpi_total_retrans;
+
+ __u64 tcpi_pacing_rate;
+ __u64 tcpi_max_pacing_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild
index 61257cb14653..5c459628e8c7 100644
--- a/include/uapi/xen/Kbuild
+++ b/include/uapi/xen/Kbuild
@@ -1,3 +1,5 @@
# UAPI Header export list
header-y += evtchn.h
+header-y += gntalloc.h
+header-y += gntdev.h
header-y += privcmd.h
diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 76bd58065f4f..76bd58065f4f 100644
--- a/include/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 5304bd3c84c5..5304bd3c84c5 100644
--- a/include/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
- * This pages are filled with an array of blkif_request_segment_aligned
- * that hold the information about the segments. The number of indirect
- * pages to use is determined by the maximum number of segments
- * a indirect request contains. Every indirect page can contain a maximum
- * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
- * so to calculate the number of indirect pages to use we have to do
- * ceil(indirect_segments/512).
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
*
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
-struct blkif_request_segment_aligned {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
-} __attribute__((__packed__));
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
#endif
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));
struct blkif_request_discard {
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e0712afa..000000000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright (C) IBM Corp. 2006
- */
-
-#ifndef _XEN_XENCOMM_H_
-#define _XEN_XENCOMM_H_
-
-/* A xencomm descriptor is a scatter/gather list containing physical
- * addresses corresponding to a virtually contiguous memory area. The
- * hypervisor translates these physical addresses to machine addresses to copy
- * to and from the virtually contiguous area.
- */
-
-#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-#define XENCOMM_INVALID (~0UL)
-
-struct xencomm_desc {
- uint32_t magic;
- uint32_t nr_addrs; /* the number of entries in address[] */
- uint64_t address[0];
-};
-
-#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039be112..000000000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- * Jerone Young <jyoung5@us.ibm.com>
- */
-
-#ifndef _LINUX_XENCOMM_H_
-#define _LINUX_XENCOMM_H_
-
-#include <xen/interface/xencomm.h>
-
-#define XENCOMM_MINI_ADDRS 3
-struct xencomm_mini {
- struct xencomm_desc _desc;
- uint64_t address[XENCOMM_MINI_ADDRS];
-};
-
-/* To avoid additionnal virt to phys conversion, an opaque structure is
- presented. */
-struct xencomm_handle;
-
-extern void xencomm_free(struct xencomm_handle *desc);
-extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
-extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
- unsigned long bytes, struct xencomm_mini *xc_area);
-
-#if 0
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- struct xencomm_mini xc_desc ## _base[(n)] \
- __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
- struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
-#else
-/*
- * gcc bug workaround:
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
- * gcc doesn't handle properly stack variable with
- * __attribute__((__align__(sizeof(struct xencomm_mini))))
- */
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- unsigned char xc_desc ## _base[((n) + 1 ) * \
- sizeof(struct xencomm_mini)]; \
- struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
- ((unsigned long)xc_desc ## _base + \
- (sizeof(struct xencomm_mini) - \
- ((unsigned long)xc_desc ## _base) % \
- sizeof(struct xencomm_mini)));
-#endif
-#define xencomm_map_no_alloc(ptr, bytes) \
- ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
- __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
-
-/* provided by architecture code: */
-extern unsigned long xencomm_vtop(unsigned long vaddr);
-
-static inline void *xencomm_pa(void *ptr)
-{
- return (void *)xencomm_vtop((unsigned long)ptr);
-}
-
-#define xen_guest_handle(hnd) ((hnd).p)
-
-#endif /* _LINUX_XENCOMM_H_ */
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index bd8e788d71e0..1ef0606797c9 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq,
EXPORT_SYMBOL(devm_request_threaded_irq);
/**
+ * devm_request_any_context_irq - allocate an interrupt line for a managed device
+ * @dev: device to request interrupt for
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @thread_fn: function to be called in a threaded interrupt context. NULL
+ * for devices which handle everything in @handler
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as
+ * request_any_context_irq(). IRQs requested with this function will be
+ * automatically freed on driver detach.
+ *
+ * If an IRQ allocated with this function needs to be freed
+ * separately, devm_free_irq() must be used.
+ */
+int devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ struct irq_devres *dr;
+ int rc;
+
+ dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
+ GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ dr->irq = irq;
+ dr->dev_id = dev_id;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_request_any_context_irq);
+
+/**
* devm_free_irq - free an interrupt
* @dev: device to free interrupt for
* @irq: Interrupt line to free
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302d6cfd..8ab8e9390297 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
+EXPORT_SYMBOL(irq_to_desc);
static void free_desc(unsigned int irq)
{
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b1d255f04135..4dae9cbe9259 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
next_seq = log_next_seq;
len = 0;
- prev = 0;
while (len >= 0 && seq < next_seq) {
struct printk_log *msg = log_from_idx(idx);
int textlen;
@@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
next_idx = idx;
l = 0;
- prev = 0;
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7a925ba456fb..a6a5bf53e86d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -51,7 +51,13 @@
* HZ shrinks, so values greater than 8 overflow 32bits when
* HZ=100.
*/
+#if HZ < 34
+#define JIFFIES_SHIFT 6
+#elif HZ < 67
+#define JIFFIES_SHIFT 7
+#else
#define JIFFIES_SHIFT 8
+#endif
static cycle_t jiffies_read(struct clocksource *cs)
{
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 43780ab5e279..98977a57ac72 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -756,6 +756,7 @@ out:
static void tick_broadcast_clear_oneshot(int cpu)
{
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
}
static void tick_broadcast_init_next_event(struct cpumask *mask,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 294b8a271a04..fc4da2d97f9b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
write &= RB_WRITE_MASK;
tail = write - length;
+ /*
+ * If this is the first commit on the page, then it has the same
+ * timestamp as the page itself.
+ */
+ if (!tail)
+ delta = 0;
+
/* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE))
return rb_move_tail(cpu_buffer, length, tail,
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 7be235f1a70b..93d145e5539c 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
/*
* Try to steal tags from a remote cpu's percpu freelist.
*
- * We first check how many percpu freelists have tags - we don't steal tags
- * unless enough percpu freelists have tags on them that it's possible more than
- * half the total tags could be stuck on remote percpu freelists.
+ * We first check how many percpu freelists have tags
*
* Then we iterate through the cpus until we find some tags - we don't attempt
* to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
struct percpu_ida_cpu *remote;
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
- cpus_have_tags--) {
+ cpus_have_tags; cpus_have_tags--) {
cpu = cpumask_next(cpu, &pool->cpus_have_tags);
if (cpu >= nr_cpu_ids) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 82166bf974e1..da23eb96779f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR;
+ set_pmd_at(mm, addr, pmd, entry);
BUG_ON(pmd_write(entry));
} else {
struct page *page = pmd_page(*pmd);
@@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) {
- entry = *pmd;
- entry = pmd_mknuma(entry);
+ pmdp_set_numa(mm, addr, pmd);
ret = HPAGE_PMD_NR;
}
}
-
- /* Set PMD if cleared earlier */
- if (ret == HPAGE_PMD_NR)
- set_pmd_at(mm, addr, pmd, entry);
-
spin_unlock(ptl);
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7332c1785744..769a67a15803 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (pte_numa(ptent))
ptent = pte_mknonnuma(ptent);
ptent = pte_modify(ptent, newprot);
+ /*
+ * Avoid taking write faults for pages we
+ * know to be dirty.
+ */
+ if (dirty_accountable && pte_dirty(ptent))
+ ptent = pte_mkwrite(ptent);
+ ptep_modify_prot_commit(mm, addr, pte, ptent);
updated = true;
} else {
struct page *page;
- ptent = *pte;
page = vm_normal_page(vma, addr, oldpte);
if (page && !PageKsm(page)) {
if (!pte_numa(oldpte)) {
- ptent = pte_mknuma(ptent);
- set_pte_at(mm, addr, pte, ptent);
+ ptep_set_numa(mm, addr, pte);
updated = true;
}
}
}
-
- /*
- * Avoid taking write faults for pages we know to be
- * dirty.
- */
- if (dirty_accountable && pte_dirty(ptent)) {
- ptent = pte_mkwrite(ptent);
- updated = true;
- }
-
if (updated)
pages++;
-
- /* Only !prot_numa always clears the pte */
- if (!prot_numa)
- ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index de51c48c4393..566adbf5c506 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -556,7 +556,7 @@ static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- int subclass = 0, i;
+ int subclass = 0;
netif_carrier_off(dev);
@@ -606,17 +606,10 @@ static int vlan_dev_init(struct net_device *dev)
vlan_dev_set_lockdep_class(dev, subclass);
- vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct vlan_pcpu_stats *vlan_stat;
- vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
- u64_stats_init(&vlan_stat->syncp);
- }
-
-
return 0;
}
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index d27b86dfb0e9..d1c55d8dd0a2 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -926,7 +926,7 @@ static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos)
struct aarp_entry *entry;
rescan:
- while(ct < AARP_HASH_SIZE) {
+ while (ct < AARP_HASH_SIZE) {
for (entry = table[ct]; entry; entry = entry->next) {
if (!pos || ++off == *pos) {
iter->table = table;
@@ -995,7 +995,7 @@ static const char *dt2str(unsigned long ticks)
{
static char buf[32];
- sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100 ) / HZ);
+ sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100) / HZ);
return buf;
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 02806c6b2ff3..786ee2f83d5f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -293,7 +293,7 @@ static int atif_probe_device(struct atalk_iface *atif)
/* Perform AARP probing for a proxy address */
static int atif_proxy_probe_device(struct atalk_iface *atif,
- struct atalk_addr* proxy_addr)
+ struct atalk_addr *proxy_addr)
{
int netrange = ntohs(atif->nets.nr_lastnet) -
ntohs(atif->nets.nr_firstnet) + 1;
@@ -581,7 +581,7 @@ out:
}
/* Delete a route. Find it and discard it */
-static int atrtr_delete(struct atalk_addr * addr)
+static int atrtr_delete(struct atalk_addr *addr)
{
struct atalk_route **r = &atalk_routes;
int retval = 0;
@@ -936,11 +936,11 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
int i, copy;
/* checksum stuff in header space */
- if ( (copy = start - offset) > 0) {
+ if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
sum = atalk_sum_partial(skb->data + offset, copy, sum);
- if ( (len -= copy) == 0)
+ if ((len -= copy) == 0)
return sum;
offset += copy;
@@ -1151,7 +1151,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
at->src_net = addr->sat_addr.s_net = ap->s_net;
- at->src_node = addr->sat_addr.s_node= ap->s_node;
+ at->src_node = addr->sat_addr.s_node = ap->s_node;
} else {
err = -EADDRNOTAVAIL;
if (!atalk_find_interface(addr->sat_addr.s_net,
@@ -1790,53 +1790,53 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
switch (cmd) {
- /* Protocol layer */
- case TIOCOUTQ: {
- long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+ /* Protocol layer */
+ case TIOCOUTQ: {
+ long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
- if (amount < 0)
- amount = 0;
- rc = put_user(amount, (int __user *)argp);
- break;
- }
- case TIOCINQ: {
- /*
- * These two are safe on a single CPU system as only
- * user tasks fiddle here
- */
- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- long amount = 0;
+ if (amount < 0)
+ amount = 0;
+ rc = put_user(amount, (int __user *)argp);
+ break;
+ }
+ case TIOCINQ: {
+ /*
+ * These two are safe on a single CPU system as only
+ * user tasks fiddle here
+ */
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+ long amount = 0;
- if (skb)
- amount = skb->len - sizeof(struct ddpehdr);
- rc = put_user(amount, (int __user *)argp);
- break;
- }
- case SIOCGSTAMP:
- rc = sock_get_timestamp(sk, argp);
- break;
- case SIOCGSTAMPNS:
- rc = sock_get_timestampns(sk, argp);
- break;
- /* Routing */
- case SIOCADDRT:
- case SIOCDELRT:
- rc = -EPERM;
- if (capable(CAP_NET_ADMIN))
- rc = atrtr_ioctl(cmd, argp);
- break;
- /* Interface */
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCATALKDIFADDR:
- case SIOCDIFADDR:
- case SIOCSARP: /* proxy AARP */
- case SIOCDARP: /* proxy AARP */
- rtnl_lock();
- rc = atif_ioctl(cmd, argp);
- rtnl_unlock();
- break;
+ if (skb)
+ amount = skb->len - sizeof(struct ddpehdr);
+ rc = put_user(amount, (int __user *)argp);
+ break;
+ }
+ case SIOCGSTAMP:
+ rc = sock_get_timestamp(sk, argp);
+ break;
+ case SIOCGSTAMPNS:
+ rc = sock_get_timestampns(sk, argp);
+ break;
+ /* Routing */
+ case SIOCADDRT:
+ case SIOCDELRT:
+ rc = -EPERM;
+ if (capable(CAP_NET_ADMIN))
+ rc = atrtr_ioctl(cmd, argp);
+ break;
+ /* Interface */
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCATALKDIFADDR:
+ case SIOCDIFADDR:
+ case SIOCSARP: /* proxy AARP */
+ case SIOCDARP: /* proxy AARP */
+ rtnl_lock();
+ rc = atif_ioctl(cmd, argp);
+ rtnl_unlock();
+ break;
}
return rc;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 512159bf607f..8323bced8e5b 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
size = bat_priv->num_ifaces * sizeof(uint8_t);
orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bat_iv.bcast_own_sum)
- goto free_bcast_own;
+ goto free_orig_node;
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
&orig_node->hash_entry);
if (hash_added != 0)
- goto free_bcast_own;
+ goto free_orig_node;
return orig_node;
-free_bcast_own:
- kfree(orig_node->bat_iv.bcast_own);
free_orig_node:
+ /* free twice, as batadv_orig_node_new sets refcount to 2 */
+ batadv_orig_node_free_ref(orig_node);
batadv_orig_node_free_ref(orig_node);
return NULL;
@@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
struct batadv_orig_node *orig_neigh)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batadv_neigh_node *neigh_node;
+ struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
if (!neigh_node)
@@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
neigh_node->orig_node = orig_neigh;
neigh_node->if_incoming = hard_iface;
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Creating new neighbor %pM for orig_node %pM on interface %s\n",
- neigh_addr, orig_node->orig, hard_iface->net_dev->name);
-
spin_lock_bh(&orig_node->neigh_list_lock);
- hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
+ neigh_addr);
+ if (!tmp_neigh_node) {
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ } else {
+ kfree(neigh_node);
+ batadv_hardif_free_ref(hard_iface);
+ neigh_node = tmp_neigh_node;
+ }
spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (!tmp_neigh_node)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+ neigh_addr, orig_node->orig,
+ hard_iface->net_dev->name);
+
out:
return neigh_node;
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 55cf2260d295..d7fafc1009a0 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -389,8 +389,6 @@ out:
batadv_neigh_ifinfo_free_ref(router_gw_tq);
if (router_orig_tq)
batadv_neigh_ifinfo_free_ref(router_orig_tq);
-
- return;
}
/**
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3d417d3641c6..b851cc580853 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
const struct batadv_hard_iface *hard_iface;
- int min_mtu = ETH_DATA_LEN;
+ int min_mtu = INT_MAX;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
}
rcu_read_unlock();
- atomic_set(&bat_priv->packet_size_max, min_mtu);
-
if (atomic_read(&bat_priv->fragmentation) == 0)
goto out;
@@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
min_mtu -= sizeof(struct batadv_frag_packet);
min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
- atomic_set(&bat_priv->packet_size_max, min_mtu);
-
- /* with fragmentation enabled we can fragment external packets easily */
- min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
out:
- return min_mtu - batadv_max_header_len();
+ /* report to the other components the maximum amount of bytes that
+ * batman-adv can send over the wire (without considering the payload
+ * overhead). For example, this value is used by TT to compute the
+ * maximum local table table size
+ */
+ atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+ /* the real soft-interface MTU is computed by removing the payload
+ * overhead from the maximum amount of bytes that was just computed.
+ *
+ * However batman-adv does not support MTUs bigger than ETH_DATA_LEN
+ */
+ return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6df12a2e3605..853941629dc1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -458,6 +458,42 @@ out:
}
/**
+ * batadv_neigh_node_get - retrieve a neighbour from the list
+ * @orig_node: originator which the neighbour belongs to
+ * @hard_iface: the interface where this neighbour is connected to
+ * @addr: the address of the neighbour
+ *
+ * Looks for and possibly returns a neighbour belonging to this originator list
+ * which is connected through the provided hard interface.
+ * Returns NULL if the neighbour is not found.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *hard_iface,
+ const uint8_t *addr)
+{
+ struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
+ if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
+ continue;
+
+ if (tmp_neigh_node->if_incoming != hard_iface)
+ continue;
+
+ if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+ continue;
+
+ res = tmp_neigh_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ return res;
+}
+
+/**
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
* @rcu: rcu pointer of the orig_ifinfo object
*/
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 37be290f63f6..db3a9ed734cb 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const uint8_t *addr);
struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *hard_iface,
+ const uint8_t *addr);
+struct batadv_neigh_node *
batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1ed9f7c9ecea..a953d5b196a3 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
int is_old_ttvn;
/* check if there is enough data before accessing it */
- if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
+ if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
return 0;
/* create a copy of the skb (in case of for re-routing) to modify it. */
@@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
if (ret != NET_RX_SUCCESS)
ret = batadv_route_unicast_packet(skb, recv_if);
+ else
+ consume_skb(skb);
return ret;
}
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 579f5f00a385..843febd1e519 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
unsigned short vid)
{
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct ethhdr *ethhdr;
struct batadv_unicast_packet *unicast_packet;
- int ret = NET_XMIT_DROP;
+ int ret = NET_XMIT_DROP, hdr_size;
if (!orig_node)
goto out;
@@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
case BATADV_UNICAST:
if (!batadv_send_skb_prepare_unicast(skb, orig_node))
goto out;
+
+ hdr_size = sizeof(*unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
orig_node,
packet_subtype))
goto out;
+
+ hdr_size = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
goto out;
}
+ ethhdr = (struct ethhdr *)(skb->data + hdr_size);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b6071f675a3e..959dde721c46 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
struct hlist_head *head;
uint32_t i, crc_tmp, crc = 0;
uint8_t flags;
+ __be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
orig_node))
continue;
- crc_tmp = crc32c(0, &tt_common->vid,
- sizeof(tt_common->vid));
+ /* use network order to read the VID: this ensures that
+ * every node reads the bytes in the same order.
+ */
+ tmp_vid = htons(tt_common->vid);
+ crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
/* compute the CRC on flags that have to be kept in sync
* among nodes
@@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
struct hlist_head *head;
uint32_t i, crc_tmp, crc = 0;
uint8_t flags;
+ __be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
if (tt_common->flags & BATADV_TT_CLIENT_NEW)
continue;
- crc_tmp = crc32c(0, &tt_common->vid,
- sizeof(tt_common->vid));
+ /* use network order to read the VID: this ensures that
+ * every node reads the bytes in the same order.
+ */
+ tmp_vid = htons(tt_common->vid);
+ crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
/* compute the CRC on flags that have to be kept in sync
* among nodes
@@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
{
struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
struct batadv_orig_node_vlan *vlan;
+ uint32_t crc;
int i;
/* check if each received CRC matches the locally stored one */
@@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
if (!vlan)
return false;
- if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+ crc = vlan->tt.crc;
+ batadv_orig_node_vlan_free_ref(vlan);
+
+ if (crc != ntohl(tt_vlan_tmp->crc))
return false;
}
@@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
spin_lock_bh(&orig_node->tt_lock);
- tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
ttvn, tt_change);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 292e619db896..d9fb93451442 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session)
del_timer(&session->timer);
}
+static void hidp_process_report(struct hidp_session *session,
+ int type, const u8 *data, int len, int intr)
+{
+ if (len > HID_MAX_BUFFER_SIZE)
+ len = HID_MAX_BUFFER_SIZE;
+
+ memcpy(session->input_buf, data, len);
+ hid_input_report(session->hid, type, session->input_buf, len, intr);
+}
+
static void hidp_process_handshake(struct hidp_session *session,
unsigned char param)
{
@@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
hidp_input_report(session, skb);
if (session->hid)
- hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
+ hidp_process_report(session, HID_INPUT_REPORT,
+ skb->data, skb->len, 0);
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
hidp_input_report(session, skb);
if (session->hid) {
- hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1);
+ hidp_process_report(session, HID_INPUT_REPORT,
+ skb->data, skb->len, 1);
BT_DBG("report len %d", skb->len);
}
} else {
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index ab5241400cf7..8798492a6e99 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,6 +24,7 @@
#define __HIDP_H
#include <linux/types.h>
+#include <linux/hid.h>
#include <linux/kref.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/l2cap.h>
@@ -179,6 +180,9 @@ struct hidp_session {
/* Used in hidp_output_raw_report() */
int output_report_success; /* boolean */
+
+ /* temporary input buffer */
+ u8 input_buf[HID_MAX_BUFFER_SIZE];
};
/* HIDP init defines */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 63f0455c0bc3..bf34451743a1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -88,18 +88,11 @@ out:
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- int i;
- br->stats = alloc_percpu(struct pcpu_sw_netstats);
+ br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *br_dev_stats;
- br_dev_stats = per_cpu_ptr(br->stats, i);
- u64_stats_init(&br_dev_stats->syncp);
- }
-
return 0;
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0676f2b199d6..82750f915865 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2082,7 +2082,6 @@ bad:
pr_err("osdc handle_map corrupt msg\n");
ceph_msg_dump(msg);
up_write(&osdc->map_sem);
- return;
}
/*
@@ -2281,7 +2280,6 @@ done_err:
bad:
pr_err("osdc handle_watch_notify corrupt msg\n");
- return;
}
/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 4ad1b78c9c77..b1b0c8d4d7df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
* 2. No high memory really exists on this machine.
*/
-static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
}
static netdev_features_t harmonize_features(struct sk_buff *skb,
- netdev_features_t features)
+ const struct net_device *dev,
+ netdev_features_t features)
{
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, skb_network_protocol(skb))) {
features &= ~NETIF_F_ALL_CSUM;
- } else if (illegal_highdma(skb->dev, skb)) {
+ } else if (illegal_highdma(dev, skb)) {
features &= ~NETIF_F_SG;
}
return features;
}
-netdev_features_t netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ const struct net_device *dev)
{
__be16 protocol = skb->protocol;
- netdev_features_t features = skb->dev->features;
+ netdev_features_t features = dev->features;
- if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+ if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
features &= ~NETIF_F_GSO_MASK;
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, features);
+ return harmonize_features(skb, dev, features);
}
- features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+ features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- return harmonize_features(skb, features);
+ return harmonize_features(skb, dev, features);
}
-EXPORT_SYMBOL(netif_skb_features);
+EXPORT_SYMBOL(netif_skb_dev_features);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87577d447554..e29e810663d7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb)
return poff;
}
-static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
-{
- if (unlikely(queue_index >= dev->real_num_tx_queues)) {
- net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
- dev->name, queue_index,
- dev->real_num_tx_queues);
- return 0;
- }
- return queue_index;
-}
-
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_XPS
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
#endif
}
-u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
return queue_index;
}
-EXPORT_SYMBOL(__netdev_pick_tx);
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb,
- accel_priv);
+ queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+ __netdev_pick_tx);
else
queue_index = __netdev_pick_tx(dev, skb);
if (!accel_priv)
- queue_index = dev_cap_txqueue(dev, queue_index);
+ queue_index = netdev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 93886246a0b4..73aa594674ef 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -996,15 +996,12 @@ static struct attribute_group dql_group = {
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
-static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
{
struct net_device *dev = queue->dev;
- int i;
-
- for (i = 0; i < dev->num_tx_queues; i++)
- if (queue == &dev->_tx[i])
- break;
+ unsigned int i;
+ i = queue - dev->_tx;
BUG_ON(i >= dev->num_tx_queues);
return i;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 4425148d2b51..467f326126e0 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -221,5 +221,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
out:
spin_unlock_bh(&fastopenq->lock);
sock_put(lsk);
- return;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 048dc8d183aa..fc122fdb266a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1121,56 +1121,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx = 0, s_idx;
- struct net_device *dev;
- struct hlist_head *head;
- struct nlattr *tb[IFLA_MAX+1];
- u32 ext_filter_mask = 0;
-
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
- rcu_read_lock();
- cb->seq = net->dev_base_seq;
-
- if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
- ifla_policy) >= 0) {
-
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
- }
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0,
- NLM_F_MULTI,
- ext_filter_mask) <= 0)
- goto out;
-
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- }
-out:
- rcu_read_unlock();
- cb->args[1] = idx;
- cb->args[0] = h;
-
- return skb->len;
-}
-
-const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
[IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
@@ -1197,7 +1148,6 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
[IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
};
-EXPORT_SYMBOL(ifla_policy);
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_KIND] = { .type = NLA_STRING },
@@ -1235,6 +1185,61 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
[IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
};
+static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int h, s_h;
+ int idx = 0, s_idx;
+ struct net_device *dev;
+ struct hlist_head *head;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
+ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ }
+
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0,
+ NLM_F_MULTI,
+ ext_filter_mask) <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+ idx++;
+ }
+ }
+out:
+ rcu_read_unlock();
+ cb->args[1] = idx;
+ cb->args[0] = h;
+
+ return skb->len;
+}
+
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
+{
+ return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
+}
+EXPORT_SYMBOL(rtnl_nla_parse_ifla);
+
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
struct net *net;
@@ -1963,16 +1968,21 @@ replay:
dev->ifindex = ifm->ifi_index;
- if (ops->newlink)
+ if (ops->newlink) {
err = ops->newlink(net, dev, tb, data);
- else
+ /* Drivers should call free_netdev() in ->destructor
+ * and unregister it on failure so that device could be
+ * finally freed in rtnl_unlock.
+ */
+ if (err < 0)
+ goto out;
+ } else {
err = register_netdevice(dev);
-
- if (err < 0) {
- free_netdev(dev);
- goto out;
+ if (err < 0) {
+ free_netdev(dev);
+ goto out;
+ }
}
-
err = rtnl_configure_link(dev, ifm);
if (err < 0)
unregister_netdevice(dev);
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index c073b81a1f3e..62b5828acde0 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
#include "tfrc.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-static bool tfrc_debug;
+bool tfrc_debug;
module_param(tfrc_debug, bool, 0644);
MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
#endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index a3d8f7c76ae0..40ee7d62b652 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,6 +21,7 @@
#include "packet_history.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
+extern bool tfrc_debug;
#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
#else
#define tfrc_pr_debug(format, a...)
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index cac505f166d5..e5302b7f7ca9 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -209,7 +209,7 @@ static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
/* Address substitution (IEC62439-3 pp 26, 50): replace mac
* address of outgoing frame with that of the outgoing slave's.
*/
- memcpy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr);
return dev_queue_xmit(skb);
}
@@ -346,7 +346,7 @@ static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
/* Payload: MacAddressA */
hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
- memcpy(hsr_sp->MacAddressA, hsr_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);
dev_queue_xmit(skb);
return;
@@ -493,7 +493,7 @@ static int check_slave_ok(struct net_device *dev)
/* Default multicast address for HSR Supervision frames */
-static const unsigned char def_multicast_addr[ETH_ALEN] = {
+static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
};
@@ -519,7 +519,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
hsr_priv->announce_timer.function = hsr_announce;
hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
- memcpy(hsr_priv->sup_multicast_addr, def_multicast_addr, ETH_ALEN);
+ ether_addr_copy(hsr_priv->sup_multicast_addr, def_multicast_addr);
hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
/* FIXME: should I modify the value of these?
@@ -547,7 +547,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
/* Set hsr_dev's MAC address to that of mac_slave1 */
- memcpy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr);
/* Set required header length */
for (i = 0; i < HSR_MAX_SLAVE; i++) {
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 327060c6c874..3d0100f0bae3 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -108,8 +108,8 @@ int hsr_create_self_node(struct list_head *self_node_db,
if (!node)
return -ENOMEM;
- memcpy(node->MacAddressA, addr_a, ETH_ALEN);
- memcpy(node->MacAddressB, addr_b, ETH_ALEN);
+ ether_addr_copy(node->MacAddressA, addr_a);
+ ether_addr_copy(node->MacAddressB, addr_b);
rcu_read_lock();
oldnode = list_first_or_null_rcu(self_node_db,
@@ -199,7 +199,7 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
/* Node is known, but frame was received from an unknown
* address. Node is PICS_SUBS capable; merge its AddrB.
*/
- memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+ ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
node->AddrB_if = dev_idx;
return node;
}
@@ -208,8 +208,8 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
if (!node)
return NULL;
- memcpy(node->MacAddressA, hsr_sp->MacAddressA, ETH_ALEN);
- memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+ ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA);
+ ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
node->AddrB_if = dev_idx;
else
@@ -250,7 +250,7 @@ void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
rcu_read_lock();
node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
if (node)
- memcpy(ethhdr->h_source, node->MacAddressA, ETH_ALEN);
+ ether_addr_copy(ethhdr->h_source, node->MacAddressA);
rcu_read_unlock();
}
@@ -272,7 +272,7 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
rcu_read_lock();
node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
if (node && (node->AddrB_if == dev_idx))
- memcpy(ethhdr->h_dest, node->MacAddressB, ETH_ALEN);
+ ether_addr_copy(ethhdr->h_dest, node->MacAddressB);
rcu_read_unlock();
}
@@ -428,13 +428,13 @@ void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
node = list_first_or_null_rcu(&hsr_priv->node_db,
struct node_entry, mac_list);
if (node)
- memcpy(addr, node->MacAddressA, ETH_ALEN);
+ ether_addr_copy(addr, node->MacAddressA);
return node;
}
node = _pos;
list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
- memcpy(addr, node->MacAddressA, ETH_ALEN);
+ ether_addr_copy(addr, node->MacAddressA);
return node;
}
@@ -462,7 +462,7 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
return -ENOENT; /* No such entry */
}
- memcpy(addr_b, node->MacAddressB, ETH_ALEN);
+ ether_addr_copy(addr_b, node->MacAddressB);
tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
if (node->time_in_stale[HSR_DEV_SLAVE_A])
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index af68dd83a4e3..10010c543edf 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -138,8 +138,8 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
break;
if (dev == hsr_priv->slave[0])
- memcpy(hsr_priv->dev->dev_addr,
- hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_priv->dev->dev_addr,
+ hsr_priv->slave[0]->dev_addr);
/* Make sure we recognize frames from ourselves in hsr_rcv() */
res = hsr_create_self_node(&hsr_priv->self_node_db,
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
index cee4425b9956..6cbc8965be91 100644
--- a/net/ieee802154/ieee802154.h
+++ b/net/ieee802154/ieee802154.h
@@ -53,6 +53,7 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_set_phyparams(struct sk_buff *skb, struct genl_info *info);
enum ieee802154_mcgrp_ids {
IEEE802154_COORD_MCGRP,
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 43f1b2bf469f..67c151bf4b91 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -115,6 +115,7 @@ static const struct genl_ops ieee8021154_ops[] = {
ieee802154_dump_phy),
IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
+ IEEE802154_OP(IEEE802154_SET_PHYPARAMS, ieee802154_set_phyparams),
/* see nl-mac.c */
IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 89b265aea151..222310a07762 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -55,7 +55,15 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
mutex_lock(&phy->pib_lock);
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
- nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+ nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel) ||
+ nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, phy->transmit_power) ||
+ nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, phy->lbt) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, phy->cca_mode) ||
+ nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, phy->cca_ed_level) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, phy->csma_retries) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, phy->min_be) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE, phy->max_be) ||
+ nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES, phy->frame_retries))
goto nla_put_failure;
for (i = 0; i < 32; i++) {
if (phy->channels_supported[i])
@@ -354,3 +362,193 @@ out_dev:
return rc;
}
+
+static int phy_set_txpower(struct wpan_phy *phy, struct genl_info *info)
+{
+ int txpower = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+ int rc;
+
+ rc = phy->set_txpower(phy, txpower);
+ if (rc < 0)
+ return rc;
+
+ phy->transmit_power = txpower;
+
+ return 0;
+}
+
+static int phy_set_lbt(struct wpan_phy *phy, struct genl_info *info)
+{
+ u8 on = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
+ int rc;
+
+ rc = phy->set_lbt(phy, on);
+ if (rc < 0)
+ return rc;
+
+ phy->lbt = on;
+
+ return 0;
+}
+
+static int phy_set_cca_mode(struct wpan_phy *phy, struct genl_info *info)
+{
+ u8 mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
+ int rc;
+
+ if (mode > 3)
+ return -EINVAL;
+
+ rc = phy->set_cca_mode(phy, mode);
+ if (rc < 0)
+ return rc;
+
+ phy->cca_mode = mode;
+
+ return 0;
+}
+
+static int phy_set_cca_ed_level(struct wpan_phy *phy, struct genl_info *info)
+{
+ s32 level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+ int rc;
+
+ rc = phy->set_cca_ed_level(phy, level);
+ if (rc < 0)
+ return rc;
+
+ phy->cca_ed_level = level;
+
+ return 0;
+}
+
+static int phy_set_csma_params(struct wpan_phy *phy, struct genl_info *info)
+{
+ int rc;
+ u8 min_be = phy->min_be;
+ u8 max_be = phy->max_be;
+ u8 retries = phy->csma_retries;
+
+ if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
+ retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
+ if (info->attrs[IEEE802154_ATTR_CSMA_MIN_BE])
+ min_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]);
+ if (info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])
+ max_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]);
+
+ if (retries > 5 || max_be < 3 || max_be > 8 || min_be > max_be)
+ return -EINVAL;
+
+ rc = phy->set_csma_params(phy, min_be, max_be, retries);
+ if (rc < 0)
+ return rc;
+
+ phy->min_be = min_be;
+ phy->max_be = max_be;
+ phy->csma_retries = retries;
+
+ return 0;
+}
+
+static int phy_set_frame_retries(struct wpan_phy *phy, struct genl_info *info)
+{
+ s8 retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]);
+ int rc;
+
+ if (retries < -1 || retries > 7)
+ return -EINVAL;
+
+ rc = phy->set_frame_retries(phy, retries);
+ if (rc < 0)
+ return rc;
+
+ phy->frame_retries = retries;
+
+ return 0;
+}
+
+int ieee802154_set_phyparams(struct sk_buff *skb, struct genl_info *info)
+{
+ struct wpan_phy *phy;
+ const char *name;
+ int rc = -ENOTSUPP;
+
+ pr_debug("%s\n", __func__);
+
+ if (!info->attrs[IEEE802154_ATTR_PHY_NAME] &&
+ !info->attrs[IEEE802154_ATTR_LBT_ENABLED] &&
+ !info->attrs[IEEE802154_ATTR_CCA_MODE] &&
+ !info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL] &&
+ !info->attrs[IEEE802154_ATTR_CSMA_RETRIES] &&
+ !info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] &&
+ !info->attrs[IEEE802154_ATTR_CSMA_MAX_BE] &&
+ !info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
+ if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
+ return -EINVAL; /* phy name should be null-terminated */
+
+ phy = wpan_phy_find(name);
+ if (!phy)
+ return -ENODEV;
+
+ if ((!phy->set_txpower && info->attrs[IEEE802154_ATTR_TXPOWER]) ||
+ (!phy->set_lbt && info->attrs[IEEE802154_ATTR_LBT_ENABLED]) ||
+ (!phy->set_cca_mode && info->attrs[IEEE802154_ATTR_CCA_MODE]) ||
+ (!phy->set_cca_ed_level &&
+ info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]))
+ goto out;
+
+ mutex_lock(&phy->pib_lock);
+
+ if (info->attrs[IEEE802154_ATTR_TXPOWER]) {
+ rc = phy_set_txpower(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_LBT_ENABLED]) {
+ rc = phy_set_lbt(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_CCA_MODE]) {
+ rc = phy_set_cca_mode(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) {
+ rc = phy_set_cca_ed_level(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES] ||
+ info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] ||
+ info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]) {
+ rc = phy_set_csma_params(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_FRAME_RETRIES]) {
+ rc = phy_set_frame_retries(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ mutex_unlock(&phy->pib_lock);
+
+ wpan_phy_put(phy);
+
+ return 0;
+
+error:
+ mutex_unlock(&phy->pib_lock);
+out:
+ wpan_phy_put(phy);
+ return rc;
+}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 6adda4d46f95..fd7be5e45cef 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -52,5 +52,15 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
[IEEE802154_ATTR_ED_LIST] = { .len = 27 },
[IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, },
+
+ [IEEE802154_ATTR_TXPOWER] = { .type = NLA_S8, },
+ [IEEE802154_ATTR_LBT_ENABLED] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
+ [IEEE802154_ATTR_CSMA_RETRIES] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CSMA_MIN_BE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
+
+ [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
};
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 4dd37615a749..edd0962d55f9 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -44,9 +44,7 @@ static DEVICE_ATTR_RO(name);
MASTER_SHOW(current_channel, "%d");
MASTER_SHOW(current_page, "%d");
-MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
- ((signed char) (phy->transmit_power << 2)) >> 2,
- (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1);
+MASTER_SHOW(transmit_power, "%d +- 1 dB");
MASTER_SHOW(cca_mode, "%d");
static ssize_t channels_supported_show(struct device *dev,
@@ -171,6 +169,12 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
phy->current_channel = -1; /* not initialised */
phy->current_page = 0; /* for compatibility */
+ /* defaults per 802.15.4-2011 */
+ phy->min_be = 3;
+ phy->max_be = 5;
+ phy->csma_retries = 4;
+ phy->frame_retries = -1; /* for compatibility, actual default is 3 */
+
return phy;
out:
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e9f1217a8afd..be8abe73bb9f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,6 +39,71 @@
#include <net/route.h>
#include <net/xfrm.h>
+static bool ip_may_fragment(const struct sk_buff *skb)
+{
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
+ !skb->local_df;
+}
+
+static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu || skb->local_df)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ return false;
+
+ return true;
+}
+
+static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
+{
+ unsigned int mtu;
+
+ if (skb->local_df || !skb_is_gso(skb))
+ return false;
+
+ mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
+
+ /* if seglen > mtu, do software segmentation for IP fragmentation on
+ * output. DF bit cannot be set since ip_forward would have sent
+ * icmp error.
+ */
+ return skb_gso_network_seglen(skb) > mtu;
+}
+
+/* called if GSO skb needs to be fragmented on forward */
+static int ip_forward_finish_gso(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ netdev_features_t features;
+ struct sk_buff *segs;
+ int ret = 0;
+
+ features = netif_skb_dev_features(skb, dst->dev);
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR(segs)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ consume_skb(skb);
+
+ do {
+ struct sk_buff *nskb = segs->next;
+ int err;
+
+ segs->next = NULL;
+ err = dst_output(segs);
+
+ if (err && ret == 0)
+ ret = err;
+ segs = nskb;
+ } while (segs);
+
+ return ret;
+}
+
static int ip_forward_finish(struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
if (unlikely(opt->optlen))
ip_forward_options(skb);
+ if (ip_gso_exceeds_dst_mtu(skb))
+ return ip_forward_finish_gso(skb);
+
return dst_output(skb);
}
@@ -59,6 +127,10 @@ int ip_forward(struct sk_buff *skb)
struct rtable *rt; /* Route we use */
struct ip_options *opt = &(IPCB(skb)->opt);
+ /* that should never happen */
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;
@@ -68,9 +140,6 @@ int ip_forward(struct sk_buff *skb)
if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
return NET_RX_SUCCESS;
- if (skb->pkt_type != PACKET_HOST)
- goto drop;
-
skb_forward_csum(skb);
/*
@@ -91,8 +160,7 @@ int ip_forward(struct sk_buff *skb)
IPCB(skb)->flags |= IPSKB_FORWARDED;
mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
- if (unlikely(skb->len > mtu && !skb_is_gso(skb) &&
- (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
+ if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 580dd96666e0..0968b28c4cf3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -186,7 +186,8 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
}
EXPORT_SYMBOL(ip_cmsg_recv);
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ bool allow_ipv6)
{
int err, val;
struct cmsghdr *cmsg;
@@ -194,6 +195,22 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
+#if defined(CONFIG_IPV6)
+ if (allow_ipv6 &&
+ cmsg->cmsg_level == SOL_IPV6 &&
+ cmsg->cmsg_type == IPV6_PKTINFO) {
+ struct in6_pktinfo *src_info;
+
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
+ return -EINVAL;
+ src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
+ return -EINVAL;
+ ipc->oif = src_info->ipi6_ifindex;
+ ipc->addr = src_info->ipi6_addr.s6_addr32[3];
+ continue;
+ }
+#endif
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 50228be5c17b..6d430ff2ba29 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -443,7 +443,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
fbt = netdev_priv(itn->fb_tunnel_dev);
dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
if (IS_ERR(dev))
- return NULL;
+ return ERR_CAST(dev);
dev->mtu = ip_tunnel_bind_dev(dev);
@@ -796,9 +796,13 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
- if (!t && (cmd == SIOCADDTUNNEL))
+ if (!t && (cmd == SIOCADDTUNNEL)) {
t = ip_tunnel_create(net, itn, p);
-
+ if (IS_ERR(t)) {
+ err = PTR_ERR(t);
+ break;
+ }
+ }
if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t != NULL) {
if (t->dev != dev) {
@@ -825,8 +829,9 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
if (t) {
err = 0;
ip_tunnel_update(itn, t, dev, p, true);
- } else
- err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+ } else {
+ err = -ENOENT;
+ }
break;
case SIOCDELTUNNEL:
@@ -1041,19 +1046,13 @@ int ip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- int i, err;
+ int err;
dev->destructor = ip_tunnel_dev_free;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ipt_stats;
- ipt_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ipt_stats->syncp);
- }
-
tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
if (!tunnel->dst_cache) {
free_percpu(dev->tstats);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index efa1138fa523..b3e86ea7b71b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -273,7 +273,7 @@ static int __init ic_open_devs(void)
msleep(1);
- if time_before(jiffies, next_msg)
+ if (time_before(jiffies, next_msg))
continue;
elapsed = jiffies_to_msecs(jiffies - start);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2d11c094296e..f4b19e5dde54 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -727,7 +727,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
if (err)
return err;
if (ipc.opt)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index c04518f4850a..a9dbe58bdfe7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -524,7 +524,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
if (err)
goto out;
if (ipc.opt)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 25071b48921c..11e4384daaf9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -697,7 +697,6 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
out_unlock:
spin_unlock_bh(&fnhe_lock);
- return;
}
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
@@ -1597,6 +1596,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
@@ -1695,10 +1695,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res);
- if (err != 0)
+ if (err != 0) {
+ if (!IN_DEV_FORWARD(in_dev))
+ err = -EHOSTUNREACH;
goto no_route;
-
- RT_CACHE_STAT_INC(in_slow_tot);
+ }
if (res.type == RTN_BROADCAST)
goto brd_input;
@@ -1712,8 +1713,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto local_input;
}
- if (!IN_DEV_FORWARD(in_dev))
+ if (!IN_DEV_FORWARD(in_dev)) {
+ err = -EHOSTUNREACH;
goto no_route;
+ }
if (res.type != RTN_UNICAST)
goto martian_destination;
@@ -1768,6 +1771,7 @@ local_input:
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9f3a2db9109e..bed379c7abcd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2794,6 +2794,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_rcv_space = tp->rcvq_space.space;
info->tcpi_total_retrans = tp->total_retrans;
+
+ info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
+ sk->sk_pacing_rate : ~0ULL;
+ info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
+ sk->sk_max_pacing_rate : ~0ULL;
}
EXPORT_SYMBOL_GPL(tcp_get_info);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index ad37bf18ae4b..f49351edf97d 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -362,21 +362,12 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
-/* Lower bound on congestion window with halving. */
-u32 tcp_reno_min_cwnd(const struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- return tp->snd_ssthresh/2;
-}
-EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
-
struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
};
/* Initial congestion control used (until SYN)
@@ -388,6 +379,5 @@ struct tcp_congestion_ops tcp_init_congestion_ops = {
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
};
EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 8ed9305dfdf4..8b9e7bad77c0 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -162,7 +162,6 @@ static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
.init = hstcp_init,
.ssthresh = hstcp_ssthresh,
.cong_avoid = hstcp_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.owner = THIS_MODULE,
.name = "highspeed"
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 478fe82611bf..2a1a9e2a4e51 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -166,7 +166,6 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
.init = hybla_init,
.ssthresh = tcp_reno_ssthresh,
- .min_cwnd = tcp_reno_min_cwnd,
.cong_avoid = hybla_cong_avoid,
.set_state = hybla_state,
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index e498a62b8f97..be047c63ca10 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -328,7 +328,6 @@ static struct tcp_congestion_ops tcp_illinois __read_mostly = {
.flags = TCP_CONG_RTT_STAMP,
.init = tcp_illinois_init,
.ssthresh = tcp_illinois_ssthresh,
- .min_cwnd = tcp_reno_min_cwnd,
.cong_avoid = tcp_illinois_cong_avoid,
.set_state = tcp_illinois_state,
.get_info = tcp_illinois_info,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 991d62a2f9bb..503798f2fcd6 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -319,7 +319,6 @@ static struct tcp_congestion_ops tcp_lp __read_mostly = {
.init = tcp_lp_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_lp_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_lp_pkts_acked,
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3be16727f058..21e8a9f33287 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2071,7 +2071,6 @@ rearm_timer:
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBES);
- return;
}
/* Push out any pending frames which were held back due to
@@ -2169,7 +2168,8 @@ u32 __tcp_select_window(struct sock *sk)
*/
int mss = icsk->icsk_ack.rcv_mss;
int free_space = tcp_space(sk);
- int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
+ int allowed_space = tcp_full_space(sk);
+ int full_space = min_t(int, tp->window_clamp, allowed_space);
int window;
if (mss > full_space)
@@ -2182,7 +2182,19 @@ u32 __tcp_select_window(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
4U * tp->advmss);
- if (free_space < mss)
+ /* free_space might become our new window, make sure we don't
+ * increase it due to wscale.
+ */
+ free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
+
+ /* if free space is less than mss estimate, or is below 1/16th
+ * of the maximum allowed, try to move to zero-window, else
+ * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
+ * new incoming data is dropped due to memory limits.
+ * With large window, mss test triggers way too late in order
+ * to announce zero window in time before rmem limit kicks in.
+ */
+ if (free_space < (allowed_space >> 4) || free_space < mss)
return 0;
}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 19ea6c2951f3..0ac50836da4d 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -39,7 +39,6 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh,
.cong_avoid = tcp_scalable_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.owner = THIS_MODULE,
.name = "scalable",
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 06cae62bf208..a022c17c9cf1 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -310,7 +310,6 @@ static struct tcp_congestion_ops tcp_vegas __read_mostly = {
.init = tcp_vegas_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_vegas_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_vegas_pkts_acked,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 76a1e23259e1..b94a04ae2ed5 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -276,7 +276,6 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
.init = tcp_westwood_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_westwood_bw_rttmin,
.cwnd_event = tcp_westwood_event,
.get_info = tcp_westwood_info,
.pkts_acked = tcp_westwood_pkts_acked,
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 1a8d271f994d..8eab02030ed0 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -231,7 +231,6 @@ static struct tcp_congestion_ops tcp_yeah __read_mostly = {
.init = tcp_yeah_init,
.ssthresh = tcp_yeah_ssthresh,
.cong_avoid = tcp_yeah_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
.get_info = tcp_vegas_get_info,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 77bd16fa9f34..4468e1adc094 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -931,7 +931,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc,
+ sk->sk_family == AF_INET6);
if (err)
return err;
if (ipc.opt)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ad235690684c..fdbfeca36d63 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev)
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
addrconf_add_linklocal(idev, &addr);
+ else
+ addrconf_prefix_route(&addr, 64, dev, 0, 0);
}
#endif
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index b30ad3741b46..731e1e1722d9 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -6,7 +6,7 @@
*/
/*
* Author:
- * YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org>
+ * YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org>
*/
#include <linux/kernel.h>
@@ -22,14 +22,13 @@
#if 0
#define ADDRLABEL(x...) printk(x)
#else
-#define ADDRLABEL(x...) do { ; } while(0)
+#define ADDRLABEL(x...) do { ; } while (0)
#endif
/*
* Policy Table
*/
-struct ip6addrlbl_entry
-{
+struct ip6addrlbl_entry {
#ifdef CONFIG_NET_NS
struct net *lbl_net;
#endif
@@ -88,39 +87,39 @@ static const __net_initconst struct ip6addrlbl_init_table
{ /* ::/0 */
.prefix = &in6addr_any,
.label = 1,
- },{ /* fc00::/7 */
- .prefix = &(struct in6_addr){{{ 0xfc }}},
+ }, { /* fc00::/7 */
+ .prefix = &(struct in6_addr){ { { 0xfc } } } ,
.prefixlen = 7,
.label = 5,
- },{ /* fec0::/10 */
- .prefix = &(struct in6_addr){{{ 0xfe, 0xc0 }}},
+ }, { /* fec0::/10 */
+ .prefix = &(struct in6_addr){ { { 0xfe, 0xc0 } } },
.prefixlen = 10,
.label = 11,
- },{ /* 2002::/16 */
- .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}},
+ }, { /* 2002::/16 */
+ .prefix = &(struct in6_addr){ { { 0x20, 0x02 } } },
.prefixlen = 16,
.label = 2,
- },{ /* 3ffe::/16 */
- .prefix = &(struct in6_addr){{{ 0x3f, 0xfe }}},
+ }, { /* 3ffe::/16 */
+ .prefix = &(struct in6_addr){ { { 0x3f, 0xfe } } },
.prefixlen = 16,
.label = 12,
- },{ /* 2001::/32 */
- .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
+ }, { /* 2001::/32 */
+ .prefix = &(struct in6_addr){ { { 0x20, 0x01 } } },
.prefixlen = 32,
.label = 6,
- },{ /* 2001:10::/28 */
- .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}},
+ }, { /* 2001:10::/28 */
+ .prefix = &(struct in6_addr){ { { 0x20, 0x01, 0x00, 0x10 } } },
.prefixlen = 28,
.label = 7,
- },{ /* ::ffff:0:0 */
- .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}},
+ }, { /* ::ffff:0:0 */
+ .prefix = &(struct in6_addr){ { { [10] = 0xff, [11] = 0xff } } },
.prefixlen = 96,
.label = 4,
- },{ /* ::/96 */
+ }, { /* ::/96 */
.prefix = &in6addr_any,
.prefixlen = 96,
.label = 3,
- },{ /* ::1/128 */
+ }, { /* ::1/128 */
.prefix = &in6addr_loopback,
.prefixlen = 128,
.label = 0,
@@ -441,7 +440,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh)
if (label == IPV6_ADDR_LABEL_DEFAULT)
return -EINVAL;
- switch(nlh->nlmsg_type) {
+ switch (nlh->nlmsg_type) {
case RTM_NEWADDRLABEL:
if (ifal->ifal_index &&
!__dev_get_by_index(net, ifal->ifal_index))
@@ -505,12 +504,13 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
if (idx >= s_idx &&
net_eq(ip6addrlbl_net(p), net)) {
- if ((err = ip6addrlbl_fill(skb, p,
- ip6addrlbl_table.seq,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDRLABEL,
- NLM_F_MULTI)) <= 0)
+ err = ip6addrlbl_fill(skb, p,
+ ip6addrlbl_table.seq,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWADDRLABEL,
+ NLM_F_MULTI);
+ if (err <= 0)
break;
}
idx++;
@@ -527,7 +527,7 @@ static inline int ip6addrlbl_msgsize(void)
+ nla_total_size(4); /* IFAL_LABEL */
}
-static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
+static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(in_skb->sk);
struct ifaddrlblmsg *ifal;
@@ -568,7 +568,8 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
goto out;
}
- if (!(skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL))) {
+ skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL);
+ if (!skb) {
ip6addrlbl_put(p);
return -ENOBUFS;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index dfa41bb4e0dc..0961b5ef866d 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -15,9 +15,7 @@
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
-#include <linux/if_arp.h>
#include <linux/in6.h>
-#include <linux/route.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -28,12 +26,8 @@
#include <net/sock.h>
#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/protocol.h>
-#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/rawv6.h>
-#include <net/icmp.h>
#include <net/transp_v6.h>
#include <asm/uaccess.h>
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f3ffb43f59c0..c98338b81d30 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1454,7 +1454,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
static int ip6gre_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
- int i;
tunnel = netdev_priv(dev);
@@ -1464,16 +1463,10 @@ static int ip6gre_tap_init(struct net_device *dev)
ip6gre_tnl_link_config(tunnel, 1);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ip6gre_tap_stats;
- ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ip6gre_tap_stats->syncp);
- }
-
return 0;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef02b26ccf81..070a2fae2375 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
return mtu;
}
+static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu || skb->local_df)
+ return false;
+
+ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
+ return true;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ return false;
+
+ return true;
+}
+
int ip6_forward(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb)
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
- (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
+ if (ip6_pkt_too_big(skb, mtu)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5db8d310f9c0..8ad59f4811df 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1502,19 +1502,12 @@ static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- int i;
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ip6_tnl_stats;
- ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ip6_tnl_stats->syncp);
- }
return 0;
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 2d19272b8cee..864914399391 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -731,18 +731,12 @@ static void vti6_dev_setup(struct net_device *dev)
static inline int vti6_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- int i;
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *stats;
- stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&stats->syncp);
- }
return 0;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3dfbcf1dcb1c..958027be0e94 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1356,7 +1356,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
static int ipip6_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int i;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
@@ -1365,16 +1364,10 @@ static int ipip6_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ipip6_tunnel_stats;
- ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ipip6_tunnel_stats->syncp);
- }
-
return 0;
}
@@ -1384,7 +1377,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
struct iphdr *iph = &tunnel->parms.iph;
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
- int i;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
@@ -1395,16 +1387,10 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ipip6_fb_stats;
- ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ipip6_fb_stats->syncp);
- }
-
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 00b2a6d1c009..41e4e93cb3aa 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1368,6 +1368,7 @@ static int ipx_release(struct socket *sock)
goto out;
lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
@@ -1791,8 +1792,11 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
- if (!skb)
+ if (!skb) {
+ if (rc == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN))
+ rc = 0;
goto out;
+ }
ipx = ipx_hdr(skb);
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
@@ -1922,6 +1926,26 @@ static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long
}
#endif
+static int ipx_shutdown(struct socket *sock, int mode)
+{
+ struct sock *sk = sock->sk;
+
+ if (mode < SHUT_RD || mode > SHUT_RDWR)
+ return -EINVAL;
+ /* This maps:
+ * SHUT_RD (0) -> RCV_SHUTDOWN (1)
+ * SHUT_WR (1) -> SEND_SHUTDOWN (2)
+ * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+ */
+ ++mode;
+
+ lock_sock(sk);
+ sk->sk_shutdown |= mode;
+ release_sock(sk);
+ sk->sk_state_change(sk);
+
+ return 0;
+}
/*
* Socket family declarations
@@ -1948,7 +1972,7 @@ static const struct proto_ops ipx_dgram_ops = {
.compat_ioctl = ipx_compat_ioctl,
#endif
.listen = sock_no_listen,
- .shutdown = sock_no_shutdown, /* FIXME: support shutdown */
+ .shutdown = ipx_shutdown,
.setsockopt = ipx_setsockopt,
.getsockopt = ipx_getsockopt,
.sendmsg = ipx_sendmsg,
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 735d0f60c83a..e5dc42f0e527 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1809,8 +1809,6 @@ void l2tp_session_free(struct l2tp_session *session)
}
kfree(session);
-
- return;
}
EXPORT_SYMBOL_GPL(l2tp_session_free);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index be5fadf34739..ec40bc344be6 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -454,13 +454,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-
if (sock) {
inet_shutdown(sock, 2);
/* Don't let the session go away before our socket does */
l2tp_session_inc_refcount(session);
}
- return;
}
/* Really kill the session socket. (Called from sock_put() if
@@ -474,7 +472,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
l2tp_session_dec_refcount(session);
}
- return;
}
/* Called when the PPPoX socket (session) is closed.
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 13b7683de5a4..ce9633a3cfb0 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -107,7 +107,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
- ieee80211_tx_skb_tid(sdata, skb, tid);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 453e974287d1..363d19b5d5c8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -451,11 +451,11 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
- if (sta->last_rx_rate_flag & RX_FLAG_80MHZ)
+ if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (sta->last_rx_rate_flag & RX_FLAG_80P80MHZ)
+ if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80P80MHZ)
rinfo->flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
- if (sta->last_rx_rate_flag & RX_FLAG_160MHZ)
+ if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
}
@@ -970,9 +970,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
/* TODO: make hostapd tell us what it wants */
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
- sdata->radar_required = params->radar_required;
mutex_lock(&local->mtx);
+ sdata->radar_required = params->radar_required;
err = ieee80211_vif_use_channel(sdata, &params->chandef,
IEEE80211_CHANCTX_SHARED);
mutex_unlock(&local->mtx);
@@ -1056,6 +1056,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
int err;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ sdata_assert_lock(sdata);
/* don't allow changing the beacon while CSA is in place - offset
* of channel switch counter may change
@@ -1083,6 +1084,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct probe_resp *old_probe_resp;
struct cfg80211_chan_def chandef;
+ sdata_assert_lock(sdata);
+
old_beacon = sdata_dereference(sdata->u.ap.beacon, sdata);
if (!old_beacon)
return -ENOENT;
@@ -1343,6 +1346,18 @@ static int sta_apply_parameters(struct ieee80211_local *local,
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
params->vht_capa, sta);
+ if (params->opmode_notif_used) {
+ enum ieee80211_band band =
+ ieee80211_get_sdata_band(sdata);
+
+ /* returned value is only needed for rc update, but the
+ * rc isn't initialized here yet, so ignore it
+ */
+ __ieee80211_vht_handle_opmode(sdata, sta,
+ params->opmode_notif,
+ band, false);
+ }
+
if (ieee80211_vif_is_mesh(&sdata->vif)) {
#ifdef CONFIG_MAC80211_MESH
u32 changed = 0;
@@ -2630,6 +2645,18 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
if (!roc)
return -ENOMEM;
+ /*
+ * If the duration is zero, then the driver
+ * wouldn't actually do anything. Set it to
+ * 10 for now.
+ *
+ * TODO: cancel the off-channel operation
+ * when we get the SKB's TX status and
+ * the wait time was zero before.
+ */
+ if (!duration)
+ duration = 10;
+
roc->chan = channel;
roc->duration = duration;
roc->req_duration = duration;
@@ -2671,18 +2698,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* otherwise actually kick it off here (for error handling) */
- /*
- * If the duration is zero, then the driver
- * wouldn't actually do anything. Set it to
- * 10 for now.
- *
- * TODO: cancel the off-channel operation
- * when we get the SKB's TX status and
- * the wait time was zero before.
- */
- if (!duration)
- duration = 10;
-
ret = drv_remain_on_channel(local, sdata, channel, duration, type);
if (ret) {
kfree(roc);
@@ -2990,69 +3005,88 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
return new_beacon;
}
-void ieee80211_csa_finalize_work(struct work_struct *work)
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
- csa_finalize_work);
struct ieee80211_local *local = sdata->local;
int err, changed = 0;
- sdata_lock(sdata);
- /* AP might have been stopped while waiting for the lock. */
- if (!sdata->vif.csa_active)
- goto unlock;
-
- if (!ieee80211_sdata_running(sdata))
- goto unlock;
+ sdata_assert_lock(sdata);
- sdata->radar_required = sdata->csa_radar_required;
mutex_lock(&local->mtx);
+ sdata->radar_required = sdata->csa_radar_required;
err = ieee80211_vif_change_channel(sdata, &changed);
mutex_unlock(&local->mtx);
if (WARN_ON(err < 0))
- goto unlock;
+ return;
if (!local->use_chanctx) {
local->_oper_chandef = sdata->csa_chandef;
ieee80211_hw_config(local, 0);
}
- ieee80211_bss_info_change_notify(sdata, changed);
-
sdata->vif.csa_active = false;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
- if (err < 0)
- goto unlock;
-
- changed |= err;
kfree(sdata->u.ap.next_beacon);
sdata->u.ap.next_beacon = NULL;
- ieee80211_bss_info_change_notify(sdata, err);
+ if (err < 0)
+ return;
+ changed |= err;
break;
case NL80211_IFTYPE_ADHOC:
- ieee80211_ibss_finish_csa(sdata);
+ err = ieee80211_ibss_finish_csa(sdata);
+ if (err < 0)
+ return;
+ changed |= err;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
err = ieee80211_mesh_finish_csa(sdata);
if (err < 0)
- goto unlock;
+ return;
+ changed |= err;
break;
#endif
default:
WARN_ON(1);
- goto unlock;
+ return;
}
+ ieee80211_bss_info_change_notify(sdata, changed);
+
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_CSA);
cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
+}
+
+void ieee80211_csa_finalize_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ csa_finalize_work);
+
+ sdata_lock(sdata);
+ /* AP might have been stopped while waiting for the lock. */
+ if (!sdata->vif.csa_active)
+ goto unlock;
+
+ if (!ieee80211_sdata_running(sdata))
+ goto unlock;
+
+ ieee80211_csa_finalize(sdata);
unlock:
sdata_unlock(sdata);
@@ -3066,9 +3100,9 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_chanctx *chanctx;
struct ieee80211_if_mesh __maybe_unused *ifmsh;
- int err, num_chanctx;
+ int err, num_chanctx, changed = 0;
- lockdep_assert_held(&sdata->wdev.mtx);
+ sdata_assert_lock(sdata);
if (!list_empty(&local->roc_list) || local->scanning)
return -EBUSY;
@@ -3107,19 +3141,40 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->csa_counter_offset_beacon =
- params->counter_offset_beacon;
- sdata->csa_counter_offset_presp = params->counter_offset_presp;
sdata->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
if (!sdata->u.ap.next_beacon)
return -ENOMEM;
+ /*
+ * With a count of 0, we don't have to wait for any
+ * TBTT before switching, so complete the CSA
+ * immediately. In theory, with a count == 1 we
+ * should delay the switch until just before the next
+ * TBTT, but that would complicate things so we switch
+ * immediately too. If we would delay the switch
+ * until the next TBTT, we would have to set the probe
+ * response here.
+ *
+ * TODO: A channel switch with count <= 1 without
+ * sending a CSA action frame is kind of useless,
+ * because the clients won't know we're changing
+ * channels. The action frame must be implemented
+ * either here or in the userspace.
+ */
+ if (params->count <= 1)
+ break;
+
+ sdata->csa_counter_offset_beacon =
+ params->counter_offset_beacon;
+ sdata->csa_counter_offset_presp = params->counter_offset_presp;
err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
if (err < 0) {
kfree(sdata->u.ap.next_beacon);
return err;
}
+ changed |= err;
+
break;
case NL80211_IFTYPE_ADHOC:
if (!sdata->vif.bss_conf.ibss_joined)
@@ -3147,17 +3202,21 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->chandef.chan->band)
return -EINVAL;
- err = ieee80211_ibss_csa_beacon(sdata, params);
- if (err < 0)
- return err;
+ /* see comments in the NL80211_IFTYPE_AP block */
+ if (params->count > 1) {
+ err = ieee80211_ibss_csa_beacon(sdata, params);
+ if (err < 0)
+ return err;
+ changed |= err;
+ }
+
+ ieee80211_send_action_csa(sdata, params);
+
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
ifmsh = &sdata->u.mesh;
- if (!ifmsh->mesh_id)
- return -EINVAL;
-
if (params->chandef.width != sdata->vif.bss_conf.chandef.width)
return -EINVAL;
@@ -3166,17 +3225,27 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->chandef.chan->band)
return -EINVAL;
- ifmsh->chsw_init = true;
- if (!ifmsh->pre_value)
- ifmsh->pre_value = 1;
- else
- ifmsh->pre_value++;
+ if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_NONE) {
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_INIT;
+ if (!ifmsh->pre_value)
+ ifmsh->pre_value = 1;
+ else
+ ifmsh->pre_value++;
+ }
- err = ieee80211_mesh_csa_beacon(sdata, params, true);
- if (err < 0) {
- ifmsh->chsw_init = false;
- return err;
+ /* see comments in the NL80211_IFTYPE_AP block */
+ if (params->count > 1) {
+ err = ieee80211_mesh_csa_beacon(sdata, params);
+ if (err < 0) {
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+ return err;
+ }
+ changed |= err;
}
+
+ if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT)
+ ieee80211_send_action_csa(sdata, params);
+
break;
#endif
default:
@@ -3193,8 +3262,13 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
sdata->csa_chandef = params->chandef;
sdata->vif.csa_active = true;
- ieee80211_bss_info_change_notify(sdata, err);
- drv_channel_switch_beacon(sdata, &params->chandef);
+ if (changed) {
+ ieee80211_bss_info_change_notify(sdata, changed);
+ drv_channel_switch_beacon(sdata, &params->chandef);
+ } else {
+ /* if the beacon didn't change, we can finalize immediately */
+ ieee80211_csa_finalize(sdata);
+ }
return 0;
}
@@ -3865,7 +3939,7 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
return 0;
}
-struct cfg80211_ops mac80211_config_ops = {
+const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
.change_virtual_intf = ieee80211_change_iface,
diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
index 7d7879f5b00b..2d51f62dc76c 100644
--- a/net/mac80211/cfg.h
+++ b/net/mac80211/cfg.h
@@ -4,6 +4,6 @@
#ifndef __CFG_H
#define __CFG_H
-extern struct cfg80211_ops mac80211_config_ops;
+extern const struct cfg80211_ops mac80211_config_ops;
#endif /* __CFG_H */
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f43613a97dd6..42c659229a09 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -196,6 +196,8 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
+ lockdep_assert_held(&local->mtx);
+
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->radar_required) {
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 80194b557a0c..2ecb4deddb5d 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -195,7 +195,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
- char _buf[12], *buf = _buf;
+ char _buf[12] = {}, *buf = _buf;
struct sta_info *sta = file->private_data;
bool start, tx;
unsigned long tid;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 70dd013de836..afbe2b203c3e 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -375,7 +375,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.delba.params = cpu_to_le16(params);
mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
- ieee80211_tx_skb_tid(sdata, skb, tid);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 2796a198728f..4453e2725e40 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -220,7 +220,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
struct ieee80211_mgmt *mgmt;
struct cfg80211_bss *bss;
u32 bss_change;
@@ -294,7 +293,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
if (ieee80211_vif_use_channel(sdata, &chandef,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
@@ -303,12 +301,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->mtx);
return;
}
+ sdata->radar_required = radar_required;
mutex_unlock(&local->mtx);
memcpy(ifibss->bssid, bssid, ETH_ALEN);
- sband = local->hw.wiphy->bands[chan->band];
-
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
capability, tsf, &chandef,
&have_higher_than_11mbit, NULL);
@@ -318,7 +315,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(ifibss->presp, presp);
mgmt = (void *)presp->head;
- sdata->radar_required = radar_required;
sdata->vif.bss_conf.enable_beacon = true;
sdata->vif.bss_conf.beacon_int = beacon_int;
sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -386,7 +382,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
presp->head_len, 0, GFP_KERNEL);
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
- cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
}
static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
@@ -521,12 +517,6 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
if (old_presp)
kfree_rcu(old_presp, rcu_head);
- /* it might not send the beacon for a while. send an action frame
- * immediately to announce the channel switch.
- */
- if (csa_settings)
- ieee80211_send_action_csa(sdata, csa_settings);
-
return BSS_CHANGED_BEACON;
out:
return ret;
@@ -536,7 +526,7 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct cfg80211_bss *cbss;
- int err;
+ int err, changed = 0;
u16 capability;
sdata_assert_lock(sdata);
@@ -568,10 +558,9 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
if (err < 0)
return err;
- if (err)
- ieee80211_bss_info_change_notify(sdata, err);
+ changed |= err;
- return 0;
+ return changed;
}
void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
@@ -799,6 +788,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
int err;
u32 sta_flags;
+ sdata_assert_lock(sdata);
+
sta_flags = IEEE80211_STA_DISABLE_VHT;
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_5:
@@ -1468,6 +1459,11 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN);
ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ /* avoid excessive retries for probe request to wildcard SSIDs */
+ if (pos[1] == 0)
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK;
+
ieee80211_tx_skb(sdata, skb);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3701930c6649..0014b5396ce5 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -616,7 +616,11 @@ struct ieee80211_if_mesh {
struct ps_data ps;
/* Channel Switching Support */
struct mesh_csa_settings __rcu *csa;
- bool chsw_init;
+ enum {
+ IEEE80211_MESH_CSA_ROLE_NONE,
+ IEEE80211_MESH_CSA_ROLE_INIT,
+ IEEE80211_MESH_CSA_ROLE_REPEATER,
+ } csa_role;
u8 chsw_ttl;
u16 pre_value;
@@ -1408,8 +1412,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_csa_settings *csa_settings,
- bool csa_action);
+ struct cfg80211_csa_settings *csa_settings);
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
/* scan/BSS handling */
@@ -1553,6 +1556,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta);
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 opmode,
+ enum ieee80211_band band, bool nss_only);
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
enum ieee80211_band band, bool nss_only);
@@ -1605,7 +1611,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
}
/* utility functions/constants */
-extern void *mac80211_wiphy_privid; /* for wiphy privid */
+extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d6d1f1df9119..088111af6c7c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -833,7 +833,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
cancel_work_sync(&local->dynamic_ps_enable_work);
cancel_work_sync(&sdata->recalc_smps);
+ sdata_lock(sdata);
sdata->vif.csa_active = false;
+ sdata_unlock(sdata);
cancel_work_sync(&sdata->csa_finalize_work);
cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -1057,7 +1059,8 @@ static void ieee80211_uninit(struct net_device *dev)
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv,
+ select_queue_fallback_t fallback)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
@@ -1075,7 +1078,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv,
+ select_queue_fallback_t fallback)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d767cfb9b45f..1f7d8422d62d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -893,10 +893,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* mac80211 supports control port protocol changing */
local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
+ } else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+ if (hw->max_signal <= 0) {
+ result = -EINVAL;
+ goto fail_wiphy_register;
+ }
+ }
WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
&& (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 5b919cab1de0..f70e9cd10552 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -688,7 +688,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
*pos++ = csa->settings.count;
*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
*pos++ = 6;
- if (ifmsh->chsw_init) {
+ if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) {
*pos++ = ifmsh->mshcfg.dot11MeshTTL;
*pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
} else {
@@ -859,18 +859,12 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
{
struct cfg80211_csa_settings params;
struct ieee80211_csa_ie csa_ie;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_chanctx *chanctx;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
- int err, num_chanctx;
+ int err;
u32 sta_flags;
- if (sdata->vif.csa_active)
- return true;
-
- if (!ifmsh->mesh_id)
- return false;
+ sdata_assert_lock(sdata);
sta_flags = IEEE80211_STA_DISABLE_VHT;
switch (sdata->vif.bss_conf.chandef.width) {
@@ -896,10 +890,6 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.chandef = csa_ie.chandef;
params.count = csa_ie.count;
- if (sdata->vif.bss_conf.chandef.chan->band !=
- params.chandef.chan->band)
- return false;
-
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
@@ -922,24 +912,12 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
return false;
}
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto failed_chswitch;
-
- /* don't handle for multi-VIF cases */
- chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
- if (chanctx->refcount > 1)
- goto failed_chswitch;
-
- num_chanctx = 0;
- list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
- num_chanctx++;
-
- if (num_chanctx > 1)
- goto failed_chswitch;
-
- rcu_read_unlock();
+ if (cfg80211_chandef_identical(&params.chandef,
+ &sdata->vif.bss_conf.chandef)) {
+ mcsa_dbg(sdata,
+ "received csa with an identical chandef, ignoring\n");
+ return true;
+ }
mcsa_dbg(sdata,
"received channel switch announcement to go to channel %d MHz\n",
@@ -953,30 +931,16 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
ifmsh->pre_value = csa_ie.pre_value;
}
- if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) {
- if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
- return false;
- } else {
+ if (ifmsh->chsw_ttl >= ifmsh->mshcfg.dot11MeshTTL)
return false;
- }
- sdata->csa_radar_required = params.radar_required;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_REPEATER;
- if (params.block_tx)
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
- IEEE80211_MAX_QUEUE_MAP,
- IEEE80211_QUEUE_STOP_REASON_CSA);
-
- sdata->csa_chandef = params.chandef;
- sdata->vif.csa_active = true;
-
- ieee80211_bss_info_change_notify(sdata, err);
- drv_channel_switch_beacon(sdata, &params.chandef);
+ if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev,
+ &params) < 0)
+ return false;
return true;
-failed_chswitch:
- rcu_read_unlock();
- return false;
}
static void
@@ -1086,7 +1050,8 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
ifmsh->sync_ops->rx_bcn_presp(sdata,
stype, mgmt, &elems, rx_status);
- if (!ifmsh->chsw_init)
+ if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
+ !sdata->vif.csa_active)
ieee80211_mesh_process_chnswitch(sdata, &elems, true);
}
@@ -1095,29 +1060,30 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
+ int changed = 0;
/* Reset the TTL value and Initiator flag */
- ifmsh->chsw_init = false;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
ifmsh->chsw_ttl = 0;
/* Remove the CSA and MCSP elements from the beacon */
tmp_csa_settings = rcu_dereference(ifmsh->csa);
rcu_assign_pointer(ifmsh->csa, NULL);
- kfree_rcu(tmp_csa_settings, rcu_head);
+ if (tmp_csa_settings)
+ kfree_rcu(tmp_csa_settings, rcu_head);
ret = ieee80211_mesh_rebuild_beacon(sdata);
if (ret)
return -EINVAL;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
sdata->vif.bss_conf.chandef.chan->center_freq);
- return 0;
+ return changed;
}
int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_csa_settings *csa_settings,
- bool csa_action)
+ struct cfg80211_csa_settings *csa_settings)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_csa_settings *tmp_csa_settings;
@@ -1141,12 +1107,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
return ret;
}
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
-
- if (csa_action)
- ieee80211_send_action_csa(sdata, csa_settings);
-
- return 0;
+ return BSS_CHANGED_BEACON;
}
static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
@@ -1210,7 +1171,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
ifmsh->pre_value = pre_value;
- if (!ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+ if (!sdata->vif.csa_active &&
+ !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
return;
}
@@ -1257,7 +1219,7 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
sdata_lock(sdata);
/* mesh already went down */
- if (!sdata->wdev.mesh_id_len)
+ if (!sdata->u.mesh.mesh_id_len)
goto out;
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -1310,7 +1272,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
sdata_lock(sdata);
/* mesh already went down */
- if (!sdata->wdev.mesh_id_len)
+ if (!sdata->u.mesh.mesh_id_len)
goto out;
if (ifmsh->preq_queue_len &&
@@ -1365,7 +1327,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
- ifmsh->chsw_init = false;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index fc1d82465b3c..61604834b914 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -508,6 +508,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
u8 *pos;
u32 cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u32 mask, ap_bf_sts, our_bf_sts;
BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
@@ -535,6 +536,16 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask;
+ our_bf_sts = cap & mask;
+
+ if (ap_bf_sts < our_bf_sts) {
+ cap &= ~mask;
+ cap |= ap_bf_sts;
+ }
+
/* reserve and fill IE */
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
@@ -745,6 +756,34 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
sband, chan, sdata->smps_mode);
+ /* if present, add any custom IEs that go before VHT */
+ if (assoc_data->ie_len) {
+ static const u8 before_vht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_PWR_CAPABILITY,
+ WLAN_EID_SUPPORTED_CHANNELS,
+ WLAN_EID_RSN,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES,
+ WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_HT_CAPABILITY,
+ WLAN_EID_BSS_COEX_2040,
+ WLAN_EID_EXT_CAPABILITY,
+ WLAN_EID_QOS_TRAFFIC_CAPA,
+ WLAN_EID_TIM_BCAST_REQ,
+ WLAN_EID_INTERWORKING,
+ };
+ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
+ before_vht, ARRAY_SIZE(before_vht),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->ap_vht_cap);
@@ -1001,7 +1040,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
}
ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
- sdata->vif.csa_active = true;
mutex_lock(&local->chanctx_mtx);
if (local->use_chanctx) {
@@ -1039,6 +1077,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->chanctx_mtx);
sdata->csa_chandef = csa_ie.chandef;
+ sdata->vif.csa_active = true;
if (csa_ie.mode)
ieee80211_stop_queues_by_reason(&local->hw,
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 22b223f13c9f..8fdadfd94ba8 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -10,15 +10,15 @@
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "rate.h"
#include "ieee80211_i.h"
#include "debugfs.h"
struct rate_control_alg {
struct list_head list;
- struct rate_control_ops *ops;
+ const struct rate_control_ops *ops;
};
static LIST_HEAD(rate_ctrl_algs);
@@ -29,7 +29,7 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
MODULE_PARM_DESC(ieee80211_default_rc_algo,
"Default rate control algorithm for mac80211 to use");
-int ieee80211_rate_control_register(struct rate_control_ops *ops)
+int ieee80211_rate_control_register(const struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
@@ -60,7 +60,7 @@ int ieee80211_rate_control_register(struct rate_control_ops *ops)
}
EXPORT_SYMBOL(ieee80211_rate_control_register);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
@@ -76,32 +76,31 @@ void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
}
EXPORT_SYMBOL(ieee80211_rate_control_unregister);
-static struct rate_control_ops *
+static const struct rate_control_ops *
ieee80211_try_rate_control_ops_get(const char *name)
{
struct rate_control_alg *alg;
- struct rate_control_ops *ops = NULL;
+ const struct rate_control_ops *ops = NULL;
if (!name)
return NULL;
mutex_lock(&rate_ctrl_mutex);
list_for_each_entry(alg, &rate_ctrl_algs, list) {
- if (!strcmp(alg->ops->name, name))
- if (try_module_get(alg->ops->module)) {
- ops = alg->ops;
- break;
- }
+ if (!strcmp(alg->ops->name, name)) {
+ ops = alg->ops;
+ break;
+ }
}
mutex_unlock(&rate_ctrl_mutex);
return ops;
}
/* Get the rate control algorithm. */
-static struct rate_control_ops *
+static const struct rate_control_ops *
ieee80211_rate_control_ops_get(const char *name)
{
- struct rate_control_ops *ops;
+ const struct rate_control_ops *ops;
const char *alg_name;
kparam_block_sysfs_write(ieee80211_default_rc_algo);
@@ -111,10 +110,6 @@ ieee80211_rate_control_ops_get(const char *name)
alg_name = name;
ops = ieee80211_try_rate_control_ops_get(alg_name);
- if (!ops) {
- request_module("rc80211_%s", alg_name);
- ops = ieee80211_try_rate_control_ops_get(alg_name);
- }
if (!ops && name)
/* try default if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
@@ -127,11 +122,6 @@ ieee80211_rate_control_ops_get(const char *name)
return ops;
}
-static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops)
-{
- module_put(ops->module);
-}
-
#ifdef CONFIG_MAC80211_DEBUGFS
static ssize_t rcname_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -158,11 +148,11 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
if (!ref)
- goto fail_ref;
+ return NULL;
ref->local = local;
ref->ops = ieee80211_rate_control_ops_get(name);
if (!ref->ops)
- goto fail_ops;
+ goto free;
#ifdef CONFIG_MAC80211_DEBUGFS
debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
@@ -172,14 +162,11 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
if (!ref->priv)
- goto fail_priv;
+ goto free;
return ref;
-fail_priv:
- ieee80211_rate_control_ops_put(ref->ops);
-fail_ops:
+free:
kfree(ref);
-fail_ref:
return NULL;
}
@@ -192,7 +179,6 @@ static void rate_control_free(struct rate_control_ref *ctrl_ref)
ctrl_ref->local->debugfs.rcdir = NULL;
#endif
- ieee80211_rate_control_ops_put(ctrl_ref->ops);
kfree(ctrl_ref);
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index b95e16c07081..9aa2a1190a86 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -21,7 +21,7 @@
struct rate_control_ref {
struct ieee80211_local *local;
- struct rate_control_ops *ops;
+ const struct rate_control_ops *ops;
void *priv;
};
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index f3d88b0c054c..26fd94fa0aed 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -657,7 +657,7 @@ minstrel_free(void *priv)
kfree(priv);
}
-struct rate_control_ops mac80211_minstrel = {
+const struct rate_control_ops mac80211_minstrel = {
.name = "minstrel",
.tx_status = minstrel_tx_status,
.get_rate = minstrel_get_rate,
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index f4301f4b2e41..046d1bd598a8 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -123,7 +123,7 @@ struct minstrel_debugfs_info {
char buf[];
};
-extern struct rate_control_ops mac80211_minstrel;
+extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index c1b5b73c5b91..bccaf854a309 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -124,7 +124,7 @@ const struct mcs_group minstrel_mcs_groups[] = {
#define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1)
-static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
+static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
@@ -1031,7 +1031,7 @@ minstrel_ht_free(void *priv)
mac80211_minstrel.free(priv);
}
-static struct rate_control_ops mac80211_minstrel_ht = {
+static const struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
.tx_status = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
@@ -1048,8 +1048,7 @@ static struct rate_control_ops mac80211_minstrel_ht = {
};
-static void
-init_sample_table(void)
+static void __init init_sample_table(void)
{
int col, i, new_idx;
u8 rnd[MCS_GROUP_RATES];
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 958fad07b54c..d0da2a70fe68 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -452,7 +452,7 @@ static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
kfree(priv_sta);
}
-static struct rate_control_ops mac80211_rcpid = {
+static const struct rate_control_ops mac80211_rcpid = {
.name = "pid",
.tx_status = rate_control_pid_tx_status,
.get_rate = rate_control_pid_get_rate,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c24ca0d0f469..593062109c50 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -40,8 +40,6 @@
static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
struct sk_buff *skb)
{
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
if (likely(skb->len > FCS_LEN))
__pskb_trim(skb, skb->len - FCS_LEN);
@@ -53,9 +51,6 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
}
}
- if (status->vendor_radiotap_len)
- __pskb_pull(skb, status->vendor_radiotap_len);
-
return skb;
}
@@ -64,14 +59,13 @@ static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
- hdr = (void *)(skb->data + status->vendor_radiotap_len);
+ hdr = (void *)(skb->data);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_FAILED_PLCP_CRC |
RX_FLAG_AMPDU_IS_ZEROLEN))
return 1;
- if (unlikely(skb->len < 16 + present_fcs_len +
- status->vendor_radiotap_len))
+ if (unlikely(skb->len < 16 + present_fcs_len))
return 1;
if (ieee80211_is_ctl(hdr->frame_control) &&
!ieee80211_is_pspoll(hdr->frame_control) &&
@@ -90,8 +84,6 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len = sizeof(struct ieee80211_radiotap_header) + 8;
/* allocate extra bitmaps */
- if (status->vendor_radiotap_len)
- len += 4;
if (status->chains)
len += 4 * hweight8(status->chains);
@@ -127,18 +119,6 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len += 2 * hweight8(status->chains);
}
- if (status->vendor_radiotap_len) {
- if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
- status->vendor_radiotap_align = 1;
- /* align standard part of vendor namespace */
- len = ALIGN(len, 2);
- /* allocate standard part of vendor namespace */
- len += 6;
- /* align vendor-defined part */
- len = ALIGN(len, status->vendor_radiotap_align);
- /* vendor-defined part is already in skb */
- }
-
return len;
}
@@ -172,7 +152,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
it_present = &rthdr->it_present;
/* radiotap header, set always present flags */
- rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+ rthdr->it_len = cpu_to_le16(rtap_len);
it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
BIT(IEEE80211_RADIOTAP_CHANNEL) |
BIT(IEEE80211_RADIOTAP_RX_FLAGS);
@@ -190,14 +170,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
}
- if (status->vendor_radiotap_len) {
- it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
- BIT(IEEE80211_RADIOTAP_EXT);
- put_unaligned_le32(it_present_val, it_present);
- it_present++;
- it_present_val = status->vendor_radiotap_bitmap;
- }
-
put_unaligned_le32(it_present_val, it_present);
pos = (void *)(it_present + 1);
@@ -307,6 +279,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
if (status->flag & RX_FLAG_HT_GF)
*pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ if (status->flag & RX_FLAG_LDPC)
+ *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
*pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
pos++;
@@ -349,20 +323,23 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
/* known field - how to handle 80+80? */
- if (status->flag & RX_FLAG_80P80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
put_unaligned_le16(known, pos);
pos += 2;
/* flags */
if (status->flag & RX_FLAG_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+ /* in VHT, STBC is binary */
+ if (status->flag & RX_FLAG_STBC_MASK)
+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
pos++;
/* bandwidth */
- if (status->flag & RX_FLAG_80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80MHZ)
*pos++ = 4;
- else if (status->flag & RX_FLAG_80P80MHZ)
+ else if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
*pos++ = 0; /* marked not known above */
- else if (status->flag & RX_FLAG_160MHZ)
+ else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
*pos++ = 11;
else if (status->flag & RX_FLAG_40MHZ)
*pos++ = 1;
@@ -372,6 +349,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos = (status->rate_idx << 4) | status->vht_nss;
pos += 4;
/* coding field */
+ if (status->flag & RX_FLAG_LDPC)
+ *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
pos++;
/* group ID */
pos++;
@@ -383,21 +362,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos++ = status->chain_signal[chain];
*pos++ = chain;
}
-
- if (status->vendor_radiotap_len) {
- /* ensure 2 byte alignment for the vendor field as required */
- if ((pos - (u8 *)rthdr) & 1)
- *pos++ = 0;
- *pos++ = status->vendor_radiotap_oui[0];
- *pos++ = status->vendor_radiotap_oui[1];
- *pos++ = status->vendor_radiotap_oui[2];
- *pos++ = status->vendor_radiotap_subns;
- put_unaligned_le16(status->vendor_radiotap_len, pos);
- pos += 2;
- /* align the actual payload as requested */
- while ((pos - (u8 *)rthdr) & (status->vendor_radiotap_align - 1))
- *pos++ = 0;
- }
}
/*
@@ -428,8 +392,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
present_fcs_len = FCS_LEN;
- /* ensure hdr->frame_control and vendor radiotap data are in skb head */
- if (!pskb_may_pull(origskb, 2 + status->vendor_radiotap_len)) {
+ /* ensure hdr->frame_control is in skb head */
+ if (!pskb_may_pull(origskb, 2)) {
dev_kfree_skb(origskb);
return NULL;
}
@@ -599,10 +563,10 @@ static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
+ if (is_multicast_ether_addr(hdr->addr1))
return 0;
- return ieee80211_is_robust_mgmt_frame(hdr);
+ return ieee80211_is_robust_mgmt_frame(skb);
}
@@ -610,10 +574,10 @@ static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
+ if (!is_multicast_ether_addr(hdr->addr1))
return 0;
- return ieee80211_is_robust_mgmt_frame(hdr);
+ return ieee80211_is_robust_mgmt_frame(skb);
}
@@ -626,7 +590,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
return -1;
- if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
+ if (!ieee80211_is_robust_mgmt_frame(skb))
return -1; /* not a robust management frame */
mmie = (struct ieee80211_mmie *)
@@ -1261,6 +1225,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (ieee80211_is_data(hdr->frame_control)) {
sta->last_rx_rate_idx = status->rate_idx;
sta->last_rx_rate_flag = status->flag;
+ sta->last_rx_rate_vht_flag = status->vht_flag;
sta->last_rx_rate_vht_nss = status->vht_nss;
}
}
@@ -1311,18 +1276,15 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
!ieee80211_has_morefrags(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+ /* PM bit is only checked in frames where it isn't reserved,
+ * in AP mode it's reserved in non-bufferable management frames
+ * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+ */
+ (!ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
- /*
- * Ignore doze->wake transitions that are
- * indicated by non-data frames, the standard
- * is unclear here, but for example going to
- * PS mode and then scanning would cause a
- * doze->wake transition for the probe request,
- * and that is clearly undesirable.
- */
- if (ieee80211_is_data(hdr->frame_control) &&
- !ieee80211_has_pm(hdr->frame_control))
+ if (!ieee80211_has_pm(hdr->frame_control))
sta_ps_end(sta);
} else {
if (ieee80211_has_pm(hdr->frame_control))
@@ -1845,8 +1807,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
* having configured keys.
*/
if (unlikely(ieee80211_is_action(fc) && !rx->key &&
- ieee80211_is_robust_mgmt_frame(
- (struct ieee80211_hdr *) rx->skb->data)))
+ ieee80211_is_robust_mgmt_frame(rx->skb)))
return -EACCES;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d77ff7090630..d4d85de0d75d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -261,6 +261,7 @@ struct ieee80211_tx_latency_stat {
* "the" transmit rate
* @last_rx_rate_idx: rx status rate index of the last data packet
* @last_rx_rate_flag: rx status flag of the last data packet
+ * @last_rx_rate_vht_flag: rx status vht flag of the last data packet
* @last_rx_rate_vht_nss: rx status nss of last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
@@ -397,6 +398,7 @@ struct sta_info {
struct ieee80211_tx_rate last_tx_rate;
int last_rx_rate_idx;
u32 last_rx_rate_flag;
+ u32 last_rx_rate_vht_flag;
u8 last_rx_rate_vht_nss;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1ee85c402439..e6e574a307c8 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -479,7 +479,7 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
u32 msrmnt;
u16 tid;
u8 *qc;
- int i, bin_range_count, bin_count;
+ int i, bin_range_count;
u32 *bin_ranges;
__le16 fc;
struct ieee80211_tx_latency_stat *tx_lat;
@@ -522,7 +522,6 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
/* count how many Tx frames transmitted with the appropriate latency */
bin_range_count = tx_latency->n_ranges;
bin_ranges = tx_latency->ranges;
- bin_count = tx_lat->bin_count;
for (i = 0; i < bin_range_count; i++) {
if (msrmnt <= bin_ranges[i]) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 97a02d3f7d87..722151fa5dce 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -452,8 +452,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
return 0;
- if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
- skb->data))
+ if (!ieee80211_is_robust_mgmt_frame(skb))
return 0;
return 1;
@@ -523,11 +522,8 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
return TX_CONTINUE;
- /* only deauth, disassoc and action are bufferable MMPDUs */
if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_deauth(hdr->frame_control) &&
- !ieee80211_is_disassoc(hdr->frame_control) &&
- !ieee80211_is_action(hdr->frame_control)) {
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
if (tx->flags & IEEE80211_TX_UNICAST)
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
return TX_CONTINUE;
@@ -567,7 +563,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = key;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1) &&
- ieee80211_is_robust_mgmt_frame(hdr) &&
+ ieee80211_is_robust_mgmt_frame(tx->skb) &&
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
tx->key = key;
else if (is_multicast_ether_addr(hdr->addr1) &&
@@ -582,12 +578,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = NULL;
else if (tx->skb->protocol == tx->sdata->control_port_protocol)
tx->key = NULL;
- else if (ieee80211_is_robust_mgmt_frame(hdr) &&
+ else if (ieee80211_is_robust_mgmt_frame(tx->skb) &&
!(ieee80211_is_action(hdr->frame_control) &&
tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
tx->key = NULL;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_robust_mgmt_frame(hdr))
+ !ieee80211_is_robust_mgmt_frame(tx->skb))
tx->key = NULL;
else {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
@@ -2402,15 +2398,6 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->csa_finalize_work);
-}
-EXPORT_SYMBOL(ieee80211_csa_finish);
-
static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
struct beacon_data *beacon)
{
@@ -2439,8 +2426,12 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(counter_offset_beacon >= beacon_data_len))
return;
- /* warn if the driver did not check for/react to csa completeness */
- if (WARN_ON(beacon_data[counter_offset_beacon] == 0))
+ /* Warn if the driver did not check for/react to csa
+ * completeness. A beacon with CSA counter set to 0 should
+ * never occur, because a counter of 1 means switch just
+ * before the next beacon.
+ */
+ if (WARN_ON(beacon_data[counter_offset_beacon] == 1))
return;
beacon_data[counter_offset_beacon]--;
@@ -2506,7 +2497,7 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
if (WARN_ON(counter_beacon > beacon_data_len))
goto out;
- if (beacon_data[counter_beacon] == 0)
+ if (beacon_data[counter_beacon] == 1)
ret = true;
out:
rcu_read_unlock();
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 676dc0967f37..d842af5c8a95 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -34,7 +34,7 @@
#include "wep.h"
/* privid for wiphys to determine whether they belong to us or not */
-void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
+const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid;
struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
{
@@ -1281,13 +1281,32 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
* that calculates local->scan_ies_len.
*/
- /* add any remaining custom IEs */
+ /* insert custom IEs that go before VHT */
if (ie && ie_len) {
- noffset = ie_len;
+ static const u8 before_vht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_DS_PARAMS,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_HT_CAPABILITY,
+ WLAN_EID_BSS_COEX_2040,
+ WLAN_EID_EXT_CAPABILITY,
+ WLAN_EID_SSID_LIST,
+ WLAN_EID_CHANNEL_USAGE,
+ WLAN_EID_INTERWORKING,
+ /* mesh ID can't happen here */
+ /* 60 GHz can't happen here right now */
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_vht, ARRAY_SIZE(before_vht),
+ offset);
if (end - pos < noffset - offset)
goto out_err;
memcpy(pos, ie + offset, noffset - offset);
pos += noffset - offset;
+ offset = noffset;
}
if (sband->vht_cap.vht_supported) {
@@ -1297,6 +1316,15 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
sband->vht_cap.cap);
}
+ /* add any remaining custom IEs */
+ if (ie && ie_len) {
+ noffset = ie_len;
+ if (end - pos < noffset - offset)
+ goto out_err;
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
+ }
+
return pos - buffer;
out_err:
WARN_ONCE(1, "not enough space for preq IEs\n");
@@ -1374,7 +1402,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band, u32 *basic_rates)
{
struct ieee80211_supported_band *sband;
- struct ieee80211_rate *bitrates;
size_t num_rates;
u32 supp_rates, rate_flags;
int i, j, shift;
@@ -1386,7 +1413,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sband))
return 1;
- bitrates = sband->bitrates;
num_rates = sband->n_bitrates;
supp_rates = 0;
for (i = 0; i < elems->supp_rates_len +
@@ -2272,11 +2298,11 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.nss = status->vht_nss;
if (status->flag & RX_FLAG_40MHZ)
ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- if (status->flag & RX_FLAG_80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80MHZ)
ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (status->flag & RX_FLAG_80P80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
- if (status->flag & RX_FLAG_160MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_160MHZ)
ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
if (status->flag & RX_FLAG_SHORT_GI)
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index d75f35c6e1a0..e9e36a256165 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -349,9 +349,9 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
}
-void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 opmode,
- enum ieee80211_band band, bool nss_only)
+u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 opmode,
+ enum ieee80211_band band, bool nss_only)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -363,7 +363,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
- return;
+ return 0;
nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
@@ -375,7 +375,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
}
if (nss_only)
- goto change;
+ return changed;
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
@@ -398,7 +398,19 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
changed |= IEEE80211_RC_BW_CHANGED;
}
- change:
- if (changed)
+ return changed;
+}
+
+void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 opmode,
+ enum ieee80211_band band, bool nss_only)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+
+ u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode,
+ band, nss_only);
+
+ if (changed > 0)
rate_control_rate_update(local, sband, sta, changed);
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 21448d629b15..b8600e3c29c8 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -301,8 +301,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
}
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
- int encrypted)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
{
__le16 mask_fc;
int a4_included, mgmt;
@@ -456,7 +455,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, b_0, aad, 0);
+ ccmp_special_blocks(skb, pn, b_0, aad);
ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, IEEE80211_CCMP_MIC_LEN));
@@ -495,7 +494,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (!ieee80211_is_data(hdr->frame_control) &&
- !ieee80211_is_robust_mgmt_frame(hdr))
+ !ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
@@ -524,7 +523,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad, 1);
+ ccmp_special_blocks(skb, pn, b_0, aad);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index 52ae6646a411..b75bb01e5c6b 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -165,6 +165,67 @@ err:
return ERR_PTR(err);
}
+static int mac802154_set_txpower(struct wpan_phy *phy, int db)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_txpower)
+ return -ENOTSUPP;
+
+ return priv->ops->set_txpower(&priv->hw, db);
+}
+
+static int mac802154_set_lbt(struct wpan_phy *phy, bool on)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_lbt)
+ return -ENOTSUPP;
+
+ return priv->ops->set_lbt(&priv->hw, on);
+}
+
+static int mac802154_set_cca_mode(struct wpan_phy *phy, u8 mode)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_cca_mode)
+ return -ENOTSUPP;
+
+ return priv->ops->set_cca_mode(&priv->hw, mode);
+}
+
+static int mac802154_set_cca_ed_level(struct wpan_phy *phy, s32 level)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_cca_ed_level)
+ return -ENOTSUPP;
+
+ return priv->ops->set_cca_ed_level(&priv->hw, level);
+}
+
+static int mac802154_set_csma_params(struct wpan_phy *phy, u8 min_be,
+ u8 max_be, u8 retries)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_csma_params)
+ return -ENOTSUPP;
+
+ return priv->ops->set_csma_params(&priv->hw, min_be, max_be, retries);
+}
+
+static int mac802154_set_frame_retries(struct wpan_phy *phy, s8 retries)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_frame_retries)
+ return -ENOTSUPP;
+
+ return priv->ops->set_frame_retries(&priv->hw, retries);
+}
+
struct ieee802154_dev *
ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
{
@@ -242,6 +303,12 @@ int ieee802154_register_device(struct ieee802154_dev *dev)
priv->phy->add_iface = mac802154_add_iface;
priv->phy->del_iface = mac802154_del_iface;
+ priv->phy->set_txpower = mac802154_set_txpower;
+ priv->phy->set_lbt = mac802154_set_lbt;
+ priv->phy->set_cca_mode = mac802154_set_cca_mode;
+ priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
+ priv->phy->set_csma_params = mac802154_set_csma_params;
+ priv->phy->set_frame_retries = mac802154_set_frame_retries;
rc = wpan_phy_register(priv->phy);
if (rc < 0)
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index 8ded97cf1c33..f48f40c1da1a 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -62,8 +62,6 @@ static void hw_addr_notify(struct work_struct *work)
pr_debug("failed changed mask %lx\n", nw->changed);
kfree(nw);
-
- return;
}
static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
@@ -79,8 +77,6 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
work->dev = dev;
work->changed = changed;
queue_work(priv->hw->dev_workqueue, &work->work);
-
- return;
}
void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 38548ec2098f..03855b0677cc 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -80,7 +80,6 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
mac802154_wpans_rx(priv, skb);
out:
dev_kfree_skb(skb);
- return;
}
static void mac802154_rx_worker(struct work_struct *work)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index fdf51353cf78..e42214b932c3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1460,7 +1460,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->netlink_bind && nlk->groups[0]) {
int i;
- for (i=0; i<nlk->ngroups; i++) {
+ for (i = 0; i < nlk->ngroups; i++) {
if (test_bit(i, nlk->groups))
nlk->netlink_bind(i);
}
@@ -2549,7 +2549,7 @@ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int fla
struct nlmsghdr *nlh;
int size = nlmsg_msg_size(len);
- nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
+ nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e9a48baf8551..36f8872cb072 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -256,10 +256,10 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
out:
/* Update datapath statistics. */
- u64_stats_update_begin(&stats->sync);
+ u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
- u64_stats_update_end(&stats->sync);
+ u64_stats_update_end(&stats->syncp);
}
static struct genl_family dp_packet_genl_family = {
@@ -295,9 +295,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
err:
stats = this_cpu_ptr(dp->stats_percpu);
- u64_stats_update_begin(&stats->sync);
+ u64_stats_update_begin(&stats->syncp);
stats->n_lost++;
- u64_stats_update_end(&stats->sync);
+ u64_stats_update_end(&stats->syncp);
return err;
}
@@ -606,9 +606,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
- start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+ start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+ } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
@@ -1215,18 +1215,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_free_dp;
- dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
if (!dp->stats_percpu) {
err = -ENOMEM;
goto err_destroy_table;
}
- for_each_possible_cpu(i) {
- struct dp_stats_percpu *dpath_stats;
- dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
- u64_stats_init(&dpath_stats->sync);
- }
-
dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!dp->ports) {
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 6be9fbb5e9cb..05317380fc03 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -55,7 +55,7 @@ struct dp_stats_percpu {
u64 n_missed;
u64 n_lost;
u64 n_mask_hit;
- struct u64_stats_sync sync;
+ struct u64_stats_sync syncp;
};
/**
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 208dd9a26dd1..3b4db3220456 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -121,7 +121,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
{
struct vport *vport;
size_t alloc_size;
- int i;
alloc_size = sizeof(struct vport);
if (priv_size) {
@@ -139,19 +138,12 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
vport->ops = ops;
INIT_HLIST_NODE(&vport->dp_hash_node);
- vport->percpu_stats = alloc_percpu(struct pcpu_sw_netstats);
+ vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vport->percpu_stats) {
kfree(vport);
return ERR_PTR(-ENOMEM);
}
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *vport_stats;
- vport_stats = per_cpu_ptr(vport->percpu_stats, i);
- u64_stats_init(&vport_stats->syncp);
- }
-
-
spin_lock_init(&vport->stats_lock);
return vport;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a2bb37506c5..48a6a93db296 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
return po->xmit == packet_direct_xmit;
}
-static u16 packet_pick_tx_queue(struct net_device *dev)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
{
return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
}
+static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ u16 queue_index;
+
+ if (ops->ndo_select_queue) {
+ queue_index = ops->ndo_select_queue(dev, skb, NULL,
+ __packet_pick_tx_queue);
+ queue_index = netdev_cap_txqueue(dev, queue_index);
+ } else {
+ queue_index = __packet_pick_tx_queue(dev, skb);
+ }
+
+ skb_set_queue_mapping(skb, queue_index);
+}
+
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
@@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
}
- skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
+ packet_pick_tx_queue(dev, skb);
+
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
packet_inc_pending(&po->tx_ring);
@@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
+
+ packet_pick_tx_queue(dev, skb);
if (po->has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
*/
if (!tx_ring)
init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
- break;
+ break;
default:
break;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ed7e0b4e7f90..b3b16c070a7f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -789,7 +789,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
if (!rfkill->ops->poll)
return;
- schedule_work(&rfkill->poll_work.work);
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work, 0);
}
EXPORT_SYMBOL(rfkill_resume_polling);
@@ -894,7 +895,8 @@ static void rfkill_poll(struct work_struct *work)
*/
rfkill->ops->poll(rfkill, rfkill->data);
- schedule_delayed_work(&rfkill->poll_work,
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
}
@@ -958,7 +960,8 @@ int __must_check rfkill_register(struct rfkill *rfkill)
INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
if (rfkill->ops->poll)
- schedule_delayed_work(&rfkill->poll_work,
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 72bdc7166345..8a5ba5add4bc 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -27,8 +27,11 @@
#include <net/act_api.h>
#include <net/netlink.h>
-void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+void tcf_hash_destroy(struct tc_action *a)
{
+ struct tcf_common *p = a->priv;
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
+
spin_lock_bh(&hinfo->lock);
hlist_del(&p->tcfc_head);
spin_unlock_bh(&hinfo->lock);
@@ -42,18 +45,22 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
}
EXPORT_SYMBOL(tcf_hash_destroy);
-int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo)
+int tcf_hash_release(struct tc_action *a, int bind)
{
+ struct tcf_common *p = a->priv;
int ret = 0;
if (p) {
if (bind)
p->tcfc_bindcnt--;
+ else if (p->tcfc_bindcnt > 0)
+ return -EPERM;
p->tcfc_refcnt--;
if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
- tcf_hash_destroy(p, hinfo);
+ if (a->ops->cleanup)
+ a->ops->cleanup(a, bind);
+ tcf_hash_destroy(a);
ret = 1;
}
}
@@ -118,6 +125,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
struct tcf_common *p;
struct nlattr *nest;
int i = 0, n_i = 0;
+ int ret = -EINVAL;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
@@ -127,10 +135,13 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
for (i = 0; i < (hinfo->hmask + 1); i++) {
head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
hlist_for_each_entry_safe(p, n, head, tcfc_head) {
- if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) {
+ a->priv = p;
+ ret = tcf_hash_release(a, 0);
+ if (ret == ACT_P_DELETED) {
module_put(a->ops->owner);
n_i++;
- }
+ } else if (ret < 0)
+ goto nla_put_failure;
}
}
if (nla_put_u32(skb, TCA_FCNT, n_i))
@@ -140,7 +151,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
return n_i;
nla_put_failure:
nla_nest_cancel(skb, nest);
- return -EINVAL;
+ return ret;
}
static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
@@ -198,7 +209,7 @@ int tcf_hash_search(struct tc_action *a, u32 index)
}
EXPORT_SYMBOL(tcf_hash_search);
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
+int tcf_hash_check(u32 index, struct tc_action *a, int bind)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = NULL;
@@ -207,19 +218,30 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
p->tcfc_bindcnt++;
p->tcfc_refcnt++;
a->priv = p;
+ return 1;
}
- return p;
+ return 0;
}
EXPORT_SYMBOL(tcf_hash_check);
-struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size, int bind)
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
+{
+ struct tcf_common *pc = a->priv;
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
+}
+EXPORT_SYMBOL(tcf_hash_cleanup);
+
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+ int size, int bind)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = kzalloc(size, GFP_KERNEL);
if (unlikely(!p))
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
p->tcfc_refcnt = 1;
if (bind)
p->tcfc_bindcnt = 1;
@@ -234,17 +256,19 @@ struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
&p->tcfc_lock, est);
if (err) {
kfree(p);
- return ERR_PTR(err);
+ return err;
}
}
a->priv = (void *) p;
- return p;
+ return 0;
}
EXPORT_SYMBOL(tcf_hash_create);
-void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+void tcf_hash_insert(struct tc_action *a)
{
+ struct tcf_common *p = a->priv;
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
spin_lock_bh(&hinfo->lock);
@@ -256,12 +280,13 @@ EXPORT_SYMBOL(tcf_hash_insert);
static LIST_HEAD(act_base);
static DEFINE_RWLOCK(act_mod_lock);
-int tcf_register_action(struct tc_action_ops *act)
+int tcf_register_action(struct tc_action_ops *act, unsigned int mask)
{
struct tc_action_ops *a;
+ int err;
- /* Must supply act, dump, cleanup and init */
- if (!act->act || !act->dump || !act->cleanup || !act->init)
+ /* Must supply act, dump and init */
+ if (!act->act || !act->dump || !act->init)
return -EINVAL;
/* Supply defaults */
@@ -270,10 +295,21 @@ int tcf_register_action(struct tc_action_ops *act)
if (!act->walk)
act->walk = tcf_generic_walker;
+ act->hinfo = kmalloc(sizeof(struct tcf_hashinfo), GFP_KERNEL);
+ if (!act->hinfo)
+ return -ENOMEM;
+ err = tcf_hashinfo_init(act->hinfo, mask);
+ if (err) {
+ kfree(act->hinfo);
+ return err;
+ }
+
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
+ tcf_hashinfo_destroy(act->hinfo);
+ kfree(act->hinfo);
return -EEXIST;
}
}
@@ -292,6 +328,8 @@ int tcf_unregister_action(struct tc_action_ops *act)
list_for_each_entry(a, &act_base, head) {
if (a == act) {
list_del(&act->head);
+ tcf_hashinfo_destroy(act->hinfo);
+ kfree(act->hinfo);
err = 0;
break;
}
@@ -368,16 +406,21 @@ exec_done:
}
EXPORT_SYMBOL(tcf_action_exec);
-void tcf_action_destroy(struct list_head *actions, int bind)
+int tcf_action_destroy(struct list_head *actions, int bind)
{
struct tc_action *a, *tmp;
+ int ret = 0;
list_for_each_entry_safe(a, tmp, actions, list) {
- if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
+ ret = tcf_hash_release(a, bind);
+ if (ret == ACT_P_DELETED)
module_put(a->ops->owner);
+ else if (ret < 0)
+ return ret;
list_del(&a->list);
kfree(a);
}
+ return ret;
}
int
@@ -642,6 +685,20 @@ act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
return rtnl_unicast(skb, net, portid);
}
+static struct tc_action *create_a(int i)
+{
+ struct tc_action *act;
+
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (act == NULL) {
+ pr_debug("create_a: failed to alloc!\n");
+ return NULL;
+ }
+ act->order = i;
+ INIT_LIST_HEAD(&act->list);
+ return act;
+}
+
static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
{
@@ -661,11 +718,10 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
index = nla_get_u32(tb[TCA_ACT_INDEX]);
err = -ENOMEM;
- a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
+ a = create_a(0);
if (a == NULL)
goto err_out;
- INIT_LIST_HEAD(&a->list);
err = -EINVAL;
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (a->ops == NULL) /* could happen in batch of actions */
@@ -695,20 +751,6 @@ static void cleanup_a(struct list_head *actions)
}
}
-static struct tc_action *create_a(int i)
-{
- struct tc_action *act;
-
- act = kzalloc(sizeof(*act), GFP_KERNEL);
- if (act == NULL) {
- pr_debug("create_a: failed to alloc!\n");
- return NULL;
- }
- act->order = i;
- INIT_LIST_HEAD(&act->list);
- return act;
-}
-
static int tca_action_flush(struct net *net, struct nlattr *nla,
struct nlmsghdr *n, u32 portid)
{
@@ -720,18 +762,12 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct nlattr *nest;
struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
- struct tc_action *a = create_a(0);
+ struct tc_action a;
int err = -ENOMEM;
- if (a == NULL) {
- pr_debug("tca_action_flush: couldnt create tc_action\n");
- return err;
- }
-
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
pr_debug("tca_action_flush: failed skb alloc\n");
- kfree(a);
return err;
}
@@ -743,8 +779,10 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
err = -EINVAL;
kind = tb[TCA_ACT_KIND];
- a->ops = tc_lookup_action(kind);
- if (a->ops == NULL) /*some idjot trying to flush unknown action */
+ memset(&a, 0, sizeof(struct tc_action));
+ INIT_LIST_HEAD(&a.list);
+ a.ops = tc_lookup_action(kind);
+ if (a.ops == NULL) /*some idjot trying to flush unknown action */
goto err_out;
nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
@@ -759,7 +797,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
if (nest == NULL)
goto out_module_put;
- err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
+ err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a);
if (err < 0)
goto out_module_put;
if (err == 0)
@@ -769,8 +807,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
nlh->nlmsg_flags |= NLM_F_ROOT;
- module_put(a->ops->owner);
- kfree(a);
+ module_put(a.ops->owner);
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
@@ -779,11 +816,10 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
return err;
out_module_put:
- module_put(a->ops->owner);
+ module_put(a.ops->owner);
err_out:
noflush_out:
kfree_skb(skb);
- kfree(a);
return err;
}
@@ -805,7 +841,11 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
}
/* now do the delete */
- tcf_action_destroy(actions, 0);
+ ret = tcf_action_destroy(actions, 0);
+ if (ret < 0) {
+ kfree_skb(skb);
+ return ret;
+ }
ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 2210187c45c2..edbf40dac709 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -37,7 +37,6 @@
#include <net/tc_act/tc_csum.h>
#define CSUM_TAB_MASK 15
-static struct tcf_hashinfo csum_hash_info;
static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
@@ -48,7 +47,6 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
{
struct nlattr *tb[TCA_CSUM_MAX + 1];
struct tc_csum *parm;
- struct tcf_common *pc;
struct tcf_csum *p;
int ret = 0, err;
@@ -63,38 +61,31 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
return -EINVAL;
parm = nla_data(tb[TCA_CSUM_PARMS]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- p = to_tcf_csum(pc);
+ p = to_tcf_csum(a);
spin_lock_bh(&p->tcf_lock);
p->tcf_action = parm->action;
p->update_flags = parm->update_flags;
spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_csum_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_csum *p = a->priv;
- return tcf_hash_release(&p->common, bind, &csum_hash_info);
-}
-
/**
* tcf_csum_skb_nextlayer - Get next layer pointer
* @skb: sk_buff to use
@@ -569,12 +560,10 @@ nla_put_failure:
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
- .hinfo = &csum_hash_info,
.type = TCA_ACT_CSUM,
.owner = THIS_MODULE,
.act = tcf_csum,
.dump = tcf_csum_dump,
- .cleanup = tcf_csum_cleanup,
.init = tcf_csum_init,
};
@@ -583,11 +572,7 @@ MODULE_LICENSE("GPL");
static int __init csum_init_module(void)
{
- int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
- if (err)
- return err;
-
- return tcf_register_action(&act_csum_ops);
+ return tcf_register_action(&act_csum_ops, CSUM_TAB_MASK);
}
static void __exit csum_cleanup_module(void)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index a0eed30d5811..d6bcbd9f7791 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -24,7 +24,6 @@
#include <net/tc_act/tc_gact.h>
#define GACT_TAB_MASK 15
-static struct tcf_hashinfo gact_hash_info;
#ifdef CONFIG_GACT_PROB
static int gact_net_rand(struct tcf_gact *gact)
@@ -57,7 +56,6 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_GACT_MAX + 1];
struct tc_gact *parm;
struct tcf_gact *gact;
- struct tcf_common *pc;
int ret = 0;
int err;
#ifdef CONFIG_GACT_PROB
@@ -86,21 +84,20 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
}
#endif
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- gact = to_gact(pc);
+ gact = to_gact(a);
spin_lock_bh(&gact->tcf_lock);
gact->tcf_action = parm->action;
@@ -113,19 +110,10 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
#endif
spin_unlock_bh(&gact->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_gact_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_gact *gact = a->priv;
-
- if (gact)
- return tcf_hash_release(&gact->common, bind, a->ops->hinfo);
- return 0;
-}
-
static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -191,12 +179,10 @@ nla_put_failure:
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
- .hinfo = &gact_hash_info,
.type = TCA_ACT_GACT,
.owner = THIS_MODULE,
.act = tcf_gact,
.dump = tcf_gact_dump,
- .cleanup = tcf_gact_cleanup,
.init = tcf_gact_init,
};
@@ -206,21 +192,17 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
- int err = tcf_hashinfo_init(&gact_hash_info, GACT_TAB_MASK);
- if (err)
- return err;
#ifdef CONFIG_GACT_PROB
pr_info("GACT probability on\n");
#else
pr_info("GACT probability NOT on\n");
#endif
- return tcf_register_action(&act_gact_ops);
+ return tcf_register_action(&act_gact_ops, GACT_TAB_MASK);
}
static void __exit gact_cleanup_module(void)
{
tcf_unregister_action(&act_gact_ops);
- tcf_hashinfo_destroy(&gact_hash_info);
}
module_init(gact_init_module);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 0a6d62174027..8a64a0734aee 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -29,7 +29,6 @@
#define IPT_TAB_MASK 15
-static struct tcf_hashinfo ipt_hash_info;
static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
{
@@ -69,22 +68,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
module_put(par.target->me);
}
-static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
+static void tcf_ipt_release(struct tc_action *a, int bind)
{
- int ret = 0;
- if (ipt) {
- if (bind)
- ipt->tcf_bindcnt--;
- ipt->tcf_refcnt--;
- if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) {
- ipt_destroy_target(ipt->tcfi_t);
- kfree(ipt->tcfi_tname);
- kfree(ipt->tcfi_t);
- tcf_hash_destroy(&ipt->common, &ipt_hash_info);
- ret = ACT_P_DELETED;
- }
- }
- return ret;
+ struct tcf_ipt *ipt = to_ipt(a);
+ ipt_destroy_target(ipt->tcfi_t);
+ kfree(ipt->tcfi_tname);
+ kfree(ipt->tcfi_t);
}
static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -99,7 +88,6 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
{
struct nlattr *tb[TCA_IPT_MAX + 1];
struct tcf_ipt *ipt;
- struct tcf_common *pc;
struct xt_entry_target *td, *t;
char *tname;
int ret = 0, err;
@@ -125,21 +113,20 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
if (tb[TCA_IPT_INDEX] != NULL)
index = nla_get_u32(tb[TCA_IPT_INDEX]);
- pc = tcf_hash_check(index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(index, a, bind) ) {
+ ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_ipt_release(to_ipt(pc), bind);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- ipt = to_ipt(pc);
+ ipt = to_ipt(a);
hook = nla_get_u32(tb[TCA_IPT_HOOK]);
@@ -170,7 +157,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
ipt->tcfi_hook = hook;
spin_unlock_bh(&ipt->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
err3:
@@ -178,21 +165,11 @@ err3:
err2:
kfree(tname);
err1:
- if (ret == ACT_P_CREATED) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
- }
+ if (ret == ACT_P_CREATED)
+ tcf_hash_cleanup(a, est);
return err;
}
-static int tcf_ipt_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_ipt *ipt = a->priv;
- return tcf_ipt_release(ipt, bind);
-}
-
static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -284,23 +261,21 @@ nla_put_failure:
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
- .hinfo = &ipt_hash_info,
.type = TCA_ACT_IPT,
.owner = THIS_MODULE,
.act = tcf_ipt,
.dump = tcf_ipt_dump,
- .cleanup = tcf_ipt_cleanup,
+ .cleanup = tcf_ipt_release,
.init = tcf_ipt_init,
};
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
- .hinfo = &ipt_hash_info,
.type = TCA_ACT_XT,
.owner = THIS_MODULE,
.act = tcf_ipt,
.dump = tcf_ipt_dump,
- .cleanup = tcf_ipt_cleanup,
+ .cleanup = tcf_ipt_release,
.init = tcf_ipt_init,
};
@@ -311,20 +286,16 @@ MODULE_ALIAS("act_xt");
static int __init ipt_init_module(void)
{
- int ret1, ret2, err;
- err = tcf_hashinfo_init(&ipt_hash_info, IPT_TAB_MASK);
- if (err)
- return err;
+ int ret1, ret2;
- ret1 = tcf_register_action(&act_xt_ops);
+ ret1 = tcf_register_action(&act_xt_ops, IPT_TAB_MASK);
if (ret1 < 0)
printk("Failed to load xt action\n");
- ret2 = tcf_register_action(&act_ipt_ops);
+ ret2 = tcf_register_action(&act_ipt_ops, IPT_TAB_MASK);
if (ret2 < 0)
printk("Failed to load ipt action\n");
if (ret1 < 0 && ret2 < 0) {
- tcf_hashinfo_destroy(&ipt_hash_info);
return ret1;
} else
return 0;
@@ -334,7 +305,6 @@ static void __exit ipt_cleanup_module(void)
{
tcf_unregister_action(&act_xt_ops);
tcf_unregister_action(&act_ipt_ops);
- tcf_hashinfo_destroy(&ipt_hash_info);
}
module_init(ipt_init_module);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0b2c6d39d396..4f912c0e225b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -31,23 +31,13 @@
#define MIRRED_TAB_MASK 7
static LIST_HEAD(mirred_list);
-static struct tcf_hashinfo mirred_hash_info;
-static int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static void tcf_mirred_release(struct tc_action *a, int bind)
{
- if (m) {
- if (bind)
- m->tcf_bindcnt--;
- m->tcf_refcnt--;
- if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
- list_del(&m->tcfm_list);
- if (m->tcfm_dev)
- dev_put(m->tcfm_dev);
- tcf_hash_destroy(&m->common, &mirred_hash_info);
- return 1;
- }
- }
- return 0;
+ struct tcf_mirred *m = to_mirred(a);
+ list_del(&m->tcfm_list);
+ if (m->tcfm_dev)
+ dev_put(m->tcfm_dev);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -61,7 +51,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_MIRRED_MAX + 1];
struct tc_mirred *parm;
struct tcf_mirred *m;
- struct tcf_common *pc;
struct net_device *dev;
int ret, ok_push = 0;
@@ -101,21 +90,20 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
dev = NULL;
}
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
+ if (!tcf_hash_check(parm->index, a, bind)) {
if (dev == NULL)
return -EINVAL;
- pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (!ovr) {
- tcf_mirred_release(to_mirred(pc), bind);
+ tcf_hash_release(a, bind);
return -EEXIST;
}
}
- m = to_mirred(pc);
+ m = to_mirred(a);
spin_lock_bh(&m->tcf_lock);
m->tcf_action = parm->action;
@@ -131,21 +119,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
spin_unlock_bh(&m->tcf_lock);
if (ret == ACT_P_CREATED) {
list_add(&m->tcfm_list, &mirred_list);
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
}
return ret;
}
-static int tcf_mirred_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_mirred *m = a->priv;
-
- if (m)
- return tcf_mirred_release(m, bind);
- return 0;
-}
-
static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -254,12 +233,11 @@ static struct notifier_block mirred_device_notifier = {
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
- .hinfo = &mirred_hash_info,
.type = TCA_ACT_MIRRED,
.owner = THIS_MODULE,
.act = tcf_mirred,
.dump = tcf_mirred_dump,
- .cleanup = tcf_mirred_cleanup,
+ .cleanup = tcf_mirred_release,
.init = tcf_mirred_init,
};
@@ -273,19 +251,13 @@ static int __init mirred_init_module(void)
if (err)
return err;
- err = tcf_hashinfo_init(&mirred_hash_info, MIRRED_TAB_MASK);
- if (err) {
- unregister_netdevice_notifier(&mirred_device_notifier);
- return err;
- }
pr_info("Mirror/redirect action on\n");
- return tcf_register_action(&act_mirred_ops);
+ return tcf_register_action(&act_mirred_ops, MIRRED_TAB_MASK);
}
static void __exit mirred_cleanup_module(void)
{
tcf_unregister_action(&act_mirred_ops);
- tcf_hashinfo_destroy(&mirred_hash_info);
unregister_netdevice_notifier(&mirred_device_notifier);
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 81f0404bb335..270a030d5fd0 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -31,8 +31,6 @@
#define NAT_TAB_MASK 15
-static struct tcf_hashinfo nat_hash_info;
-
static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
[TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
};
@@ -44,7 +42,6 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_nat *parm;
int ret = 0, err;
struct tcf_nat *p;
- struct tcf_common *pc;
if (nla == NULL)
return -EINVAL;
@@ -57,20 +54,19 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return -EINVAL;
parm = nla_data(tb[TCA_NAT_PARMS]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- p = to_tcf_nat(pc);
+ p = to_tcf_nat(a);
spin_lock_bh(&p->tcf_lock);
p->old_addr = parm->old_addr;
@@ -82,18 +78,11 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_nat_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_nat *p = a->priv;
-
- return tcf_hash_release(&p->common, bind, &nat_hash_info);
-}
-
static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -293,12 +282,10 @@ nla_put_failure:
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
- .hinfo = &nat_hash_info,
.type = TCA_ACT_NAT,
.owner = THIS_MODULE,
.act = tcf_nat,
.dump = tcf_nat_dump,
- .cleanup = tcf_nat_cleanup,
.init = tcf_nat_init,
};
@@ -307,16 +294,12 @@ MODULE_LICENSE("GPL");
static int __init nat_init_module(void)
{
- int err = tcf_hashinfo_init(&nat_hash_info, NAT_TAB_MASK);
- if (err)
- return err;
- return tcf_register_action(&act_nat_ops);
+ return tcf_register_action(&act_nat_ops, NAT_TAB_MASK);
}
static void __exit nat_cleanup_module(void)
{
tcf_unregister_action(&act_nat_ops);
- tcf_hashinfo_destroy(&nat_hash_info);
}
module_init(nat_init_module);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index be3f0f6875bb..5f9bcb2e080b 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -25,8 +25,6 @@
#define PEDIT_TAB_MASK 15
-static struct tcf_hashinfo pedit_hash_info;
-
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
};
@@ -39,7 +37,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct tc_pedit *parm;
int ret = 0, err;
struct tcf_pedit *p;
- struct tcf_common *pc;
struct tc_pedit_key *keys = NULL;
int ksize;
@@ -57,26 +54,22 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
return -EINVAL;
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
+ if (!tcf_hash_check(parm->index, a, bind)) {
if (!parm->nkeys)
return -EINVAL;
- pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
- p = to_pedit(pc);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ if (ret)
+ return ret;
+ p = to_pedit(a);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ tcf_hash_cleanup(a, est);
return -ENOMEM;
}
ret = ACT_P_CREATED;
} else {
- p = to_pedit(pc);
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ p = to_pedit(a);
+ tcf_hash_release(a, bind);
if (bind)
return 0;
if (!ovr)
@@ -100,22 +93,15 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
memcpy(p->tcfp_keys, parm->keys, ksize);
spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_pedit_cleanup(struct tc_action *a, int bind)
+static void tcf_pedit_cleanup(struct tc_action *a, int bind)
{
struct tcf_pedit *p = a->priv;
-
- if (p) {
- struct tc_pedit_key *keys = p->tcfp_keys;
- if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) {
- kfree(keys);
- return 1;
- }
- }
- return 0;
+ struct tc_pedit_key *keys = p->tcfp_keys;
+ kfree(keys);
}
static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
@@ -230,7 +216,6 @@ nla_put_failure:
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
- .hinfo = &pedit_hash_info,
.type = TCA_ACT_PEDIT,
.owner = THIS_MODULE,
.act = tcf_pedit,
@@ -245,15 +230,11 @@ MODULE_LICENSE("GPL");
static int __init pedit_init_module(void)
{
- int err = tcf_hashinfo_init(&pedit_hash_info, PEDIT_TAB_MASK);
- if (err)
- return err;
- return tcf_register_action(&act_pedit_ops);
+ return tcf_register_action(&act_pedit_ops, PEDIT_TAB_MASK);
}
static void __exit pedit_cleanup_module(void)
{
- tcf_hashinfo_destroy(&pedit_hash_info);
tcf_unregister_action(&act_pedit_ops);
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 1778209a332f..0566e4606a4a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -41,7 +41,6 @@ struct tcf_police {
container_of(pc, struct tcf_police, common)
#define POL_TAB_MASK 15
-static struct tcf_hashinfo police_hash_info;
/* old policer structure from before tc actions */
struct tc_police_compat {
@@ -234,7 +233,7 @@ override:
police->tcfp_t_c = ktime_to_ns(ktime_get());
police->tcf_index = parm->index ? parm->index :
- tcf_hash_new_index(a->ops->hinfo);
+ tcf_hash_new_index(hinfo);
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
spin_lock_bh(&hinfo->lock);
hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
@@ -253,14 +252,6 @@ failure:
return err;
}
-static int tcf_act_police_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_police *p = a->priv;
- if (p)
- return tcf_hash_release(&p->common, bind, &police_hash_info);
- return 0;
-}
-
static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -357,12 +348,10 @@ MODULE_LICENSE("GPL");
static struct tc_action_ops act_police_ops = {
.kind = "police",
- .hinfo = &police_hash_info,
.type = TCA_ID_POLICE,
.owner = THIS_MODULE,
.act = tcf_act_police,
.dump = tcf_act_police_dump,
- .cleanup = tcf_act_police_cleanup,
.init = tcf_act_police_locate,
.walk = tcf_act_police_walker
};
@@ -370,19 +359,12 @@ static struct tc_action_ops act_police_ops = {
static int __init
police_init_module(void)
{
- int err = tcf_hashinfo_init(&police_hash_info, POL_TAB_MASK);
- if (err)
- return err;
- err = tcf_register_action(&act_police_ops);
- if (err)
- tcf_hashinfo_destroy(&police_hash_info);
- return err;
+ return tcf_register_action(&act_police_ops, POL_TAB_MASK);
}
static void __exit
police_cleanup_module(void)
{
- tcf_hashinfo_destroy(&police_hash_info);
tcf_unregister_action(&act_police_ops);
}
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 8ef2f1fcbfba..992c2317ce88 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -25,7 +25,6 @@
#include <net/tc_act/tc_defact.h>
#define SIMP_TAB_MASK 7
-static struct tcf_hashinfo simp_hash_info;
#define SIMP_MAX_DATA 32
static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
@@ -47,20 +46,10 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
return d->tcf_action;
}
-static int tcf_simp_release(struct tcf_defact *d, int bind)
+static void tcf_simp_release(struct tc_action *a, int bind)
{
- int ret = 0;
- if (d) {
- if (bind)
- d->tcf_bindcnt--;
- d->tcf_refcnt--;
- if (d->tcf_bindcnt <= 0 && d->tcf_refcnt <= 0) {
- kfree(d->tcfd_defdata);
- tcf_hash_destroy(&d->common, &simp_hash_info);
- ret = 1;
- }
- }
- return ret;
+ struct tcf_defact *d = to_defact(a);
+ kfree(d->tcfd_defdata);
}
static int alloc_defdata(struct tcf_defact *d, char *defdata)
@@ -94,7 +83,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_DEF_MAX + 1];
struct tc_defact *parm;
struct tcf_defact *d;
- struct tcf_common *pc;
char *defdata;
int ret = 0, err;
@@ -114,29 +102,25 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_DEF_PARMS]);
defdata = nla_data(tb[TCA_DEF_DATA]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ if (ret)
+ return ret;
- d = to_defact(pc);
+ d = to_defact(a);
ret = alloc_defdata(d, defdata);
if (ret < 0) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ tcf_hash_cleanup(a, est);
return ret;
}
d->tcf_action = parm->action;
ret = ACT_P_CREATED;
} else {
- d = to_defact(pc);
+ d = to_defact(a);
if (bind)
return 0;
- tcf_simp_release(d, bind);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
@@ -144,19 +128,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
}
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_simp_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_defact *d = a->priv;
-
- if (d)
- return tcf_simp_release(d, bind);
- return 0;
-}
-
static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
@@ -187,12 +162,11 @@ nla_put_failure:
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
- .hinfo = &simp_hash_info,
.type = TCA_ACT_SIMP,
.owner = THIS_MODULE,
.act = tcf_simp,
.dump = tcf_simp_dump,
- .cleanup = tcf_simp_cleanup,
+ .cleanup = tcf_simp_release,
.init = tcf_simp_init,
};
@@ -202,23 +176,15 @@ MODULE_LICENSE("GPL");
static int __init simp_init_module(void)
{
- int err, ret;
- err = tcf_hashinfo_init(&simp_hash_info, SIMP_TAB_MASK);
- if (err)
- return err;
-
- ret = tcf_register_action(&act_simp_ops);
+ int ret;
+ ret = tcf_register_action(&act_simp_ops, SIMP_TAB_MASK);
if (!ret)
pr_info("Simple TC action Loaded\n");
- else
- tcf_hashinfo_destroy(&simp_hash_info);
-
return ret;
}
static void __exit simp_cleanup_module(void)
{
- tcf_hashinfo_destroy(&simp_hash_info);
tcf_unregister_action(&act_simp_ops);
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 98725080b5aa..fcfeeaf838be 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -28,7 +28,6 @@
#include <net/tc_act/tc_skbedit.h>
#define SKBEDIT_TAB_MASK 15
-static struct tcf_hashinfo skbedit_hash_info;
static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
@@ -65,7 +64,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
struct tcf_skbedit *d;
- struct tcf_common *pc;
u32 flags = 0, *priority = NULL, *mark = NULL;
u16 *queue_mapping = NULL;
int ret = 0, err;
@@ -100,19 +98,18 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ if (ret)
+ return ret;
- d = to_skbedit(pc);
+ d = to_skbedit(a);
ret = ACT_P_CREATED;
} else {
- d = to_skbedit(pc);
+ d = to_skbedit(a);
if (bind)
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
@@ -132,19 +129,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
spin_unlock_bh(&d->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_skbedit *d = a->priv;
-
- if (d)
- return tcf_hash_release(&d->common, bind, &skbedit_hash_info);
- return 0;
-}
-
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
@@ -186,12 +174,10 @@ nla_put_failure:
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
- .hinfo = &skbedit_hash_info,
.type = TCA_ACT_SKBEDIT,
.owner = THIS_MODULE,
.act = tcf_skbedit,
.dump = tcf_skbedit_dump,
- .cleanup = tcf_skbedit_cleanup,
.init = tcf_skbedit_init,
};
@@ -201,15 +187,11 @@ MODULE_LICENSE("GPL");
static int __init skbedit_init_module(void)
{
- int err = tcf_hashinfo_init(&skbedit_hash_info, SKBEDIT_TAB_MASK);
- if (err)
- return err;
- return tcf_register_action(&act_skbedit_ops);
+ return tcf_register_action(&act_skbedit_ops, SKBEDIT_TAB_MASK);
}
static void __exit skbedit_cleanup_module(void)
{
- tcf_hashinfo_destroy(&skbedit_hash_info);
tcf_unregister_action(&act_skbedit_ops);
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index de1059af6da1..f1669a00f571 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -117,6 +117,11 @@ struct netem_sched_data {
LOST_IN_BURST_PERIOD,
} _4_state_model;
+ enum {
+ GOOD_STATE = 1,
+ BAD_STATE,
+ } GE_state_model;
+
/* Correlated Loss Generation models */
struct clgstate {
/* state of the Markov chain */
@@ -272,15 +277,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
struct clgstate *clg = &q->clg;
switch (clg->state) {
- case 1:
+ case GOOD_STATE:
if (prandom_u32() < clg->a1)
- clg->state = 2;
+ clg->state = BAD_STATE;
if (prandom_u32() < clg->a4)
return true;
break;
- case 2:
+ case BAD_STATE:
if (prandom_u32() < clg->a2)
- clg->state = 1;
+ clg->state = GOOD_STATE;
if (prandom_u32() > clg->a3)
return true;
}
@@ -689,9 +694,8 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
return 0;
}
-static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
+static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_corr *c = nla_data(attr);
init_crandom(&q->delay_cor, c->delay_corr);
@@ -699,27 +703,24 @@ static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
init_crandom(&q->dup_cor, c->dup_corr);
}
-static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
+static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_reorder *r = nla_data(attr);
q->reorder = r->probability;
init_crandom(&q->reorder_cor, r->correlation);
}
-static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
+static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_corrupt *r = nla_data(attr);
q->corrupt = r->probability;
init_crandom(&q->corrupt_cor, r->correlation);
}
-static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_rate *r = nla_data(attr);
q->rate = r->rate;
@@ -732,9 +733,8 @@ static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
}
-static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct nlattr *la;
int rem;
@@ -752,7 +752,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
q->loss_model = CLG_4_STATES;
- q->clg.state = 1;
+ q->clg.state = TX_IN_GAP_PERIOD;
q->clg.a1 = gi->p13;
q->clg.a2 = gi->p31;
q->clg.a3 = gi->p32;
@@ -770,7 +770,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
}
q->loss_model = CLG_GILB_ELL;
- q->clg.state = 1;
+ q->clg.state = GOOD_STATE;
q->clg.a1 = ge->p;
q->clg.a2 = ge->r;
q->clg.a3 = ge->h;
@@ -821,6 +821,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
struct netem_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_NETEM_MAX + 1];
struct tc_netem_qopt *qopt;
+ struct clgstate old_clg;
+ int old_loss_model = CLG_RANDOM;
int ret;
if (opt == NULL)
@@ -831,6 +833,33 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (ret < 0)
return ret;
+ /* backup q->clg and q->loss_model */
+ old_clg = q->clg;
+ old_loss_model = q->loss_model;
+
+ if (tb[TCA_NETEM_LOSS]) {
+ ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
+ if (ret) {
+ q->loss_model = old_loss_model;
+ return ret;
+ }
+ } else {
+ q->loss_model = CLG_RANDOM;
+ }
+
+ if (tb[TCA_NETEM_DELAY_DIST]) {
+ ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
+ if (ret) {
+ /* recover clg and loss_model, in case of
+ * q->clg and q->loss_model were modified
+ * in get_loss_clg()
+ */
+ q->clg = old_clg;
+ q->loss_model = old_loss_model;
+ return ret;
+ }
+ }
+
sch->limit = qopt->limit;
q->latency = qopt->latency;
@@ -848,22 +877,16 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
q->reorder = ~0;
if (tb[TCA_NETEM_CORR])
- get_correlation(sch, tb[TCA_NETEM_CORR]);
-
- if (tb[TCA_NETEM_DELAY_DIST]) {
- ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
- if (ret)
- return ret;
- }
+ get_correlation(q, tb[TCA_NETEM_CORR]);
if (tb[TCA_NETEM_REORDER])
- get_reorder(sch, tb[TCA_NETEM_REORDER]);
+ get_reorder(q, tb[TCA_NETEM_REORDER]);
if (tb[TCA_NETEM_CORRUPT])
- get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
+ get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
if (tb[TCA_NETEM_RATE])
- get_rate(sch, tb[TCA_NETEM_RATE]);
+ get_rate(q, tb[TCA_NETEM_RATE]);
if (tb[TCA_NETEM_RATE64])
q->rate = max_t(u64, q->rate,
@@ -872,10 +895,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_ECN])
q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
- q->loss_model = CLG_RANDOM;
- if (tb[TCA_NETEM_LOSS])
- ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
-
return ret;
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a255d0200a59..fefeeb73f15f 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -15,6 +15,11 @@
*
* ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
* University of Oslo, Norway.
+ *
+ * References:
+ * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
+ * IEEE Conference on High Performance Switching and Routing 2013 :
+ * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
*/
#include <linux/module.h>
@@ -36,7 +41,7 @@ struct pie_params {
psched_time_t target; /* user specified target delay in pschedtime */
u32 tupdate; /* timer frequency (in jiffies) */
u32 limit; /* number of packets that can be enqueued */
- u32 alpha; /* alpha and beta are between -4 and 4 */
+ u32 alpha; /* alpha and beta are between 0 and 32 */
u32 beta; /* and are used for shift relative to 1 */
bool ecn; /* true if ecn is enabled */
bool bytemode; /* to scale drop early prob based on pkt size */
@@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch)
if (qdelay == 0 && qlen != 0)
update_prob = false;
- /* Add ranges for alpha and beta, more aggressive for high dropping
- * mode and gentle steps for light dropping mode
- * In light dropping mode, take gentle steps; in medium dropping mode,
- * take medium steps; in high dropping mode, take big steps.
+ /* In the algorithm, alpha and beta are between 0 and 2 with typical
+ * value for alpha as 0.125. In this implementation, we use values 0-32
+ * passed from user space to represent this. Also, alpha and beta have
+ * unit of HZ and need to be scaled before they can used to update
+ * probability. alpha/beta are updated locally below by 1) scaling them
+ * appropriately 2) scaling down by 16 to come to 0-2 range.
+ * Please see paper for details.
+ *
+ * We scale alpha and beta differently depending on whether we are in
+ * light, medium or high dropping mode.
*/
if (q->vars.prob < MAX_PROB / 100) {
alpha =
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5ae609200674..f558433537b8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
return false;
}
-/* Increase asoc's rwnd by len and send any window update SACK if needed. */
-void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
+/* Update asoc's rwnd for the approximated state in the buffer,
+ * and check whether SACK needs to be sent.
+ */
+void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
{
+ int rx_count;
struct sctp_chunk *sack;
struct timer_list *timer;
- if (asoc->rwnd_over) {
- if (asoc->rwnd_over >= len) {
- asoc->rwnd_over -= len;
- } else {
- asoc->rwnd += (len - asoc->rwnd_over);
- asoc->rwnd_over = 0;
- }
- } else {
- asoc->rwnd += len;
- }
+ if (asoc->ep->rcvbuf_policy)
+ rx_count = atomic_read(&asoc->rmem_alloc);
+ else
+ rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
- /* If we had window pressure, start recovering it
- * once our rwnd had reached the accumulated pressure
- * threshold. The idea is to recover slowly, but up
- * to the initial advertised window.
- */
- if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
- int change = min(asoc->pathmtu, asoc->rwnd_press);
- asoc->rwnd += change;
- asoc->rwnd_press -= change;
- }
+ if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
+ asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
+ else
+ asoc->rwnd = 0;
- pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
- __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
- asoc->a_rwnd);
+ pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
+ __func__, asoc, asoc->rwnd, rx_count,
+ asoc->base.sk->sk_rcvbuf);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
- if (sctp_peer_needs_update(asoc)) {
+ if (update_peer && sctp_peer_needs_update(asoc)) {
asoc->a_rwnd = asoc->rwnd;
pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
@@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
}
}
-/* Decrease asoc's rwnd by len. */
-void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
-{
- int rx_count;
- int over = 0;
-
- if (unlikely(!asoc->rwnd || asoc->rwnd_over))
- pr_debug("%s: association:%p has asoc->rwnd:%u, "
- "asoc->rwnd_over:%u!\n", __func__, asoc,
- asoc->rwnd, asoc->rwnd_over);
-
- if (asoc->ep->rcvbuf_policy)
- rx_count = atomic_read(&asoc->rmem_alloc);
- else
- rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
-
- /* If we've reached or overflowed our receive buffer, announce
- * a 0 rwnd if rwnd would still be positive. Store the
- * the potential pressure overflow so that the window can be restored
- * back to original value.
- */
- if (rx_count >= asoc->base.sk->sk_rcvbuf)
- over = 1;
-
- if (asoc->rwnd >= len) {
- asoc->rwnd -= len;
- if (over) {
- asoc->rwnd_press += asoc->rwnd;
- asoc->rwnd = 0;
- }
- } else {
- asoc->rwnd_over = len - asoc->rwnd;
- asoc->rwnd = 0;
- }
-
- pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
- __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
- asoc->rwnd_press);
-}
/* Build the bind address list for the association based on info from the
* local endpoint and the remote peer.
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 483dcd71b3c5..591b44d3b7de 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
- if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
+ if ((!chunk->data_accepted) && (!asoc->rwnd ||
(datalen > asoc->rwnd + asoc->frag_point))) {
/* If this is the next TSN, consider reneging to make
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e91d6e5df63..981aaf8b6ace 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -64,6 +64,7 @@
#include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/file.h>
+#include <linux/compat.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk,
/*
* New (hopefully final) interface for the API.
* We use the sctp_getaddrs_old structure so that use-space library
- * can avoid any unnecessary allocations. The only defferent part
+ * can avoid any unnecessary allocations. The only different part
* is that we store the actual length of the address buffer into the
- * addrs_num structure member. That way we can re-use the existing
+ * addrs_num structure member. That way we can re-use the existing
* code.
*/
+#ifdef CONFIG_COMPAT
+struct compat_sctp_getaddrs_old {
+ sctp_assoc_t assoc_id;
+ s32 addr_num;
+ compat_uptr_t addrs; /* struct sockaddr * */
+};
+#endif
+
static int sctp_getsockopt_connectx3(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
@@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
sctp_assoc_t assoc_id = 0;
int err = 0;
- if (len < sizeof(param))
- return -EINVAL;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct compat_sctp_getaddrs_old param32;
- if (copy_from_user(&param, optval, sizeof(param)))
- return -EFAULT;
+ if (len < sizeof(param32))
+ return -EINVAL;
+ if (copy_from_user(&param32, optval, sizeof(param32)))
+ return -EFAULT;
- err = __sctp_setsockopt_connectx(sk,
- (struct sockaddr __user *)param.addrs,
- param.addr_num, &assoc_id);
+ param.assoc_id = param32.assoc_id;
+ param.addr_num = param32.addr_num;
+ param.addrs = compat_ptr(param32.addrs);
+ } else
+#endif
+ {
+ if (len < sizeof(param))
+ return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+ }
+ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
+ param.addrs, param.addr_num,
+ &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
return -EFAULT;
@@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->sk_receive_queue, skb);
- /* When only partial message is copied to the user, increase
- * rwnd by that amount. If all the data in the skb is read,
- * rwnd is updated when the event is freed.
- */
- if (!sctp_ulpevent_is_notification(event))
- sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
(event->msg_flags & MSG_EOR))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7135e617ab0f..35c8923b5554 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = {
},
{
.procname = "cookie_hmac_alg",
+ .data = &init_net.sctp.sctp_hmac_alg,
.maxlen = 8,
.mode = 0644,
.proc_handler = proc_sctp_do_hmac_alg,
@@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
int sctp_sysctl_net_register(struct net *net)
{
- struct ctl_table *table;
- int i;
+ struct ctl_table *table = sctp_net_table;
+
+ if (!net_eq(net, &init_net)) {
+ int i;
- table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
- for (i = 0; table[i].data; i++)
- table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+ for (i = 0; table[i].data; i++)
+ table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+ }
net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
return 0;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d0810dc5f079..1d348d15b33d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -652,5 +652,4 @@ void sctp_transport_immediate_rtx(struct sctp_transport *t)
if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
sctp_transport_hold(t);
}
- return;
}
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..8d198ae03606 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
skb = sctp_event2skb(event);
/* Set the owner and charge rwnd for bytes received. */
sctp_ulpevent_set_owner(event, asoc);
- sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
+ sctp_assoc_rwnd_update(asoc, false);
if (!skb->data_len)
return;
@@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
{
struct sk_buff *skb, *frag;
unsigned int len;
+ struct sctp_association *asoc;
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
@@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
}
done:
- sctp_assoc_rwnd_increase(event->asoc, len);
+ asoc = event->asoc;
+ sctp_association_hold(asoc);
sctp_ulpevent_release_owner(event);
+ sctp_assoc_rwnd_update(asoc, true);
+ sctp_association_put(asoc);
}
static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
diff --git a/net/socket.c b/net/socket.c
index 879933aaed4c..840cffb7119b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -593,7 +593,7 @@ void sock_release(struct socket *sock)
}
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
- printk(KERN_ERR "sock_release: fasync list not empty!\n");
+ pr_err("%s: fasync list not empty!\n", __func__);
if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
return;
@@ -1265,8 +1265,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
static int warned;
if (!warned) {
warned = 1;
- printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
- current->comm);
+ pr_info("%s uses obsolete (PF_INET,SOCK_PACKET)\n",
+ current->comm);
}
family = PF_PACKET;
}
@@ -2595,8 +2595,7 @@ int sock_register(const struct net_proto_family *ops)
int err;
if (ops->family >= NPROTO) {
- printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
- NPROTO);
+ pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO);
return -ENOBUFS;
}
@@ -2610,7 +2609,7 @@ int sock_register(const struct net_proto_family *ops)
}
spin_unlock(&net_family_lock);
- printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
+ pr_info("NET: Registered protocol family %d\n", ops->family);
return err;
}
EXPORT_SYMBOL(sock_register);
@@ -2638,7 +2637,7 @@ void sock_unregister(int family)
synchronize_rcu();
- printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
+ pr_info("NET: Unregistered protocol family %d\n", family);
}
EXPORT_SYMBOL(sock_unregister);
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 60b00ab93d74..a74acf9ee804 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,6 +37,8 @@
#ifndef _TIPC_ADDR_H
#define _TIPC_ADDR_H
+#include "core.h"
+
#define TIPC_ZONE_MASK 0xff000000u
#define TIPC_CLUSTER_MASK 0xfffff000u
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index bf860d9e75af..e0feb7ef1469 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -356,9 +356,9 @@ static void bclink_peek_nack(struct tipc_msg *msg)
}
/*
- * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
+ * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
*/
-int tipc_bclink_send_msg(struct sk_buff *buf)
+int tipc_bclink_xmit(struct sk_buff *buf)
{
int res;
@@ -370,7 +370,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
goto exit;
}
- res = tipc_link_send_buf(bcl, buf);
+ res = __tipc_link_xmit(bcl, buf);
if (likely(res >= 0)) {
bclink_set_last_sent();
bcl->stats.queue_sz_counts++;
@@ -399,19 +399,18 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
*/
if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
- tipc_link_send_proto_msg(
- node->active_links[node->addr & 1],
- STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(node->active_links[node->addr & 1],
+ STATE_MSG, 0, 0, 0, 0, 0);
bcl->stats.sent_acks++;
}
}
/**
- * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
*
* tipc_net_lock is read_locked, no other locks set
*/
-void tipc_bclink_recv_pkt(struct sk_buff *buf)
+void tipc_bclink_rcv(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
struct tipc_node *node;
@@ -468,7 +467,7 @@ receive:
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
if (likely(msg_mcast(msg)))
- tipc_port_recv_mcast(buf, NULL);
+ tipc_port_mcast_rcv(buf, NULL);
else
kfree_skb(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
@@ -478,12 +477,12 @@ receive:
bcl->stats.recv_bundled += msg_msgcnt(msg);
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
- tipc_link_recv_bundle(buf);
+ tipc_link_bundle_rcv(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
int ret;
- ret = tipc_link_recv_fragment(&node->bclink.reasm_head,
- &node->bclink.reasm_tail,
- &buf);
+ ret = tipc_link_frag_rcv(&node->bclink.reasm_head,
+ &node->bclink.reasm_tail,
+ &buf);
if (ret == LINK_REASM_ERROR)
goto unlock;
spin_lock_bh(&bc_lock);
@@ -503,7 +502,7 @@ receive:
bclink_accept_pkt(node, seqno);
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
- tipc_named_recv(buf);
+ tipc_named_rcv(buf);
} else {
spin_lock_bh(&bc_lock);
bclink_accept_pkt(node, seqno);
@@ -785,7 +784,6 @@ void tipc_bclink_init(void)
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
- spin_lock_init(&bcbearer->bearer.lock);
bcl->b_ptr = &bcbearer->bearer;
bcl->state = WORKING_WORKING;
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 6ee587b469fd..a80ef54b818e 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -90,8 +90,8 @@ void tipc_bclink_add_node(u32 addr);
void tipc_bclink_remove_node(u32 addr);
struct tipc_node *tipc_bclink_retransmit_to(void);
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-int tipc_bclink_send_msg(struct sk_buff *buf);
-void tipc_bclink_recv_pkt(struct sk_buff *buf);
+int tipc_bclink_xmit(struct sk_buff *buf);
+void tipc_bclink_rcv(struct sk_buff *buf);
u32 tipc_bclink_get_last_sent(void);
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a38c89969c68..242cddd35a47 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -51,7 +51,7 @@ static struct tipc_media * const media_info_array[] = {
struct tipc_bearer tipc_bearers[MAX_BEARERS];
-static void bearer_disable(struct tipc_bearer *b_ptr);
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
/**
* tipc_media_find - locates specified media object by name
@@ -327,12 +327,10 @@ restart:
b_ptr->net_plane = bearer_id + 'A';
b_ptr->active = 1;
b_ptr->priority = priority;
- INIT_LIST_HEAD(&b_ptr->links);
- spin_lock_init(&b_ptr->lock);
res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr, disc_domain);
if (res) {
- bearer_disable(b_ptr);
+ bearer_disable(b_ptr, false);
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
name);
goto exit;
@@ -350,20 +348,9 @@ exit:
*/
static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
{
- struct tipc_link *l_ptr;
- struct tipc_link *temp_l_ptr;
-
read_lock_bh(&tipc_net_lock);
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- spin_lock_bh(&b_ptr->lock);
- list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
- struct tipc_node *n_ptr = l_ptr->owner;
-
- spin_lock_bh(&n_ptr->lock);
- tipc_link_reset(l_ptr);
- spin_unlock_bh(&n_ptr->lock);
- }
- spin_unlock_bh(&b_ptr->lock);
+ tipc_link_reset_list(b_ptr->identity);
read_unlock_bh(&tipc_net_lock);
return 0;
}
@@ -373,25 +360,14 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
*
* Note: This routine assumes caller holds tipc_net_lock.
*/
-static void bearer_disable(struct tipc_bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
{
- struct tipc_link *l_ptr;
- struct tipc_link *temp_l_ptr;
- struct tipc_link_req *temp_req;
-
pr_info("Disabling bearer <%s>\n", b_ptr->name);
- spin_lock_bh(&b_ptr->lock);
b_ptr->media->disable_media(b_ptr);
- list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
- tipc_link_delete(l_ptr);
- }
- temp_req = b_ptr->link_req;
- b_ptr->link_req = NULL;
- spin_unlock_bh(&b_ptr->lock);
-
- if (temp_req)
- tipc_disc_delete(temp_req);
+ tipc_link_delete_list(b_ptr->identity, shutting_down);
+ if (b_ptr->link_req)
+ tipc_disc_delete(b_ptr->link_req);
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
@@ -406,7 +382,7 @@ int tipc_disable_bearer(const char *name)
pr_warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
} else {
- bearer_disable(b_ptr);
+ bearer_disable(b_ptr, false);
res = 0;
}
write_unlock_bh(&tipc_net_lock);
@@ -626,6 +602,6 @@ void tipc_bearer_stop(void)
for (i = 0; i < MAX_BEARERS; i++) {
if (tipc_bearers[i].active)
- bearer_disable(&tipc_bearers[i]);
+ bearer_disable(&tipc_bearers[i], true);
}
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 4f5db9ad5bf6..425dd8107a8f 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -107,10 +107,8 @@ struct tipc_media {
/**
* struct tipc_bearer - Generic TIPC bearer structure
- * @dev: ptr to associated network device
- * @usr_handle: pointer to additional media-specific information about bearer
+ * @media_ptr: pointer to additional media-specific information about bearer
* @mtu: max packet size bearer can support
- * @lock: spinlock for controlling access to bearer
* @addr: media-specific address associated with bearer
* @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
@@ -120,7 +118,6 @@ struct tipc_media {
* @tolerance: default link tolerance for bearer
* @identity: array index of this bearer within TIPC bearer array
* @link_req: ptr to (optional) structure making periodic link setup requests
- * @links: list of non-congested links associated with bearer
* @active: non-zero if bearer structure is represents a bearer
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @nodes: indicates which nodes in cluster can be reached through bearer
@@ -134,7 +131,6 @@ struct tipc_bearer {
u32 mtu; /* initalized by media */
struct tipc_media_addr addr; /* initalized by media */
char name[TIPC_MAX_BEARER_NAME];
- spinlock_t lock;
struct tipc_media *media;
struct tipc_media_addr bcast_addr;
u32 priority;
@@ -142,7 +138,6 @@ struct tipc_bearer {
u32 tolerance;
u32 identity;
struct tipc_link_req *link_req;
- struct list_head links;
int active;
char net_plane;
struct tipc_node_map nodes;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f9e88d8b04ca..3f76b98d2fed 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -1,7 +1,7 @@
/*
* net/tipc/core.c: TIPC module code
*
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2013, Ericsson AB
* Copyright (c) 2005-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1ff477b0450d..5569d96b4da3 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer)
struct tipc_skb_cb {
void *handle;
+ bool deferred;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 412ff41b8611..fa94da6db3d4 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -110,11 +110,11 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
}
/**
- * tipc_disc_recv_msg - handle incoming link setup message (request or response)
+ * tipc_disc_rcv - handle incoming link setup message (request or response)
* @buf: buffer containing message
* @b_ptr: bearer that message arrived on
*/
-void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
struct tipc_node *n_ptr;
struct tipc_link *link;
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 75b67c403aa3..b4fc962c3623 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -44,6 +44,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
void tipc_disc_delete(struct tipc_link_req *req);
void tipc_disc_add_dest(struct tipc_link_req *req);
void tipc_disc_remove_dest(struct tipc_link_req *req);
-void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d4b5de41b682..d1a764b9b2d8 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -77,19 +77,19 @@ static const char *link_unk_evt = "Unknown link event ";
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf);
-static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
struct sk_buff **buf);
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
-static int link_send_sections_long(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destnode);
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destnode);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
-static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
-static void tipc_link_send_sync(struct tipc_link *l);
-static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
+static void tipc_link_sync_xmit(struct tipc_link *l);
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
/*
* Simple link routines
@@ -147,11 +147,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
/**
* link_timeout - handle expiration of link timer
* @l_ptr: pointer to link
- *
- * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
- * with tipc_link_delete(). (There is no risk that the node will be deleted by
- * another thread because tipc_link_delete() always cancels the link timer before
- * tipc_node_delete() is called.)
*/
static void link_timeout(struct tipc_link *l_ptr)
{
@@ -213,8 +208,8 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
* Returns pointer to link.
*/
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
- struct tipc_bearer *b_ptr,
- const struct tipc_media_addr *media_addr)
+ struct tipc_bearer *b_ptr,
+ const struct tipc_media_addr *media_addr)
{
struct tipc_link *l_ptr;
struct tipc_msg *msg;
@@ -279,41 +274,43 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
k_init_timer(&l_ptr->timer, (Handler)link_timeout,
(unsigned long)l_ptr);
- list_add_tail(&l_ptr->link_list, &b_ptr->links);
link_state_event(l_ptr, STARTING_EVT);
return l_ptr;
}
-/**
- * tipc_link_delete - delete a link
- * @l_ptr: pointer to link
- *
- * Note: 'tipc_net_lock' is write_locked, bearer is locked.
- * This routine must not grab the node lock until after link timer cancellation
- * to avoid a potential deadlock situation.
- */
-void tipc_link_delete(struct tipc_link *l_ptr)
-{
- if (!l_ptr) {
- pr_err("Attempt to delete non-existent link\n");
- return;
- }
- k_cancel_timer(&l_ptr->timer);
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
+{
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
- tipc_node_lock(l_ptr->owner);
- tipc_link_reset(l_ptr);
- tipc_node_detach_link(l_ptr->owner, l_ptr);
- tipc_link_purge_queues(l_ptr);
- list_del_init(&l_ptr->link_list);
- tipc_node_unlock(l_ptr->owner);
- k_term_timer(&l_ptr->timer);
- kfree(l_ptr);
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ spin_lock_bh(&n_ptr->lock);
+ l_ptr = n_ptr->links[bearer_id];
+ if (l_ptr) {
+ tipc_link_reset(l_ptr);
+ if (shutting_down || !tipc_node_is_up(n_ptr)) {
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
+ tipc_link_reset_fragments(l_ptr);
+ spin_unlock_bh(&n_ptr->lock);
+
+ /* Nobody else can access this link now: */
+ del_timer_sync(&l_ptr->timer);
+ kfree(l_ptr);
+ } else {
+ /* Detach/delete when failover is finished: */
+ l_ptr->flags |= LINK_STOPPED;
+ spin_unlock_bh(&n_ptr->lock);
+ del_timer_sync(&l_ptr->timer);
+ }
+ continue;
+ }
+ spin_unlock_bh(&n_ptr->lock);
+ }
}
-
/**
* link_schedule_port - schedule port for deferred sending
* @l_ptr: pointer to link
@@ -461,6 +458,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
link_reset_statistics(l_ptr);
}
+void tipc_link_reset_list(unsigned int bearer_id)
+{
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
+
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ spin_lock_bh(&n_ptr->lock);
+ l_ptr = n_ptr->links[bearer_id];
+ if (l_ptr)
+ tipc_link_reset(l_ptr);
+ spin_unlock_bh(&n_ptr->lock);
+ }
+}
static void link_activate(struct tipc_link *l_ptr)
{
@@ -479,7 +489,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
struct tipc_link *other;
u32 cont_intv = l_ptr->continuity_interval;
- if (!l_ptr->started && (event != STARTING_EVT))
+ if (l_ptr->flags & LINK_STOPPED)
+ return;
+
+ if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
return; /* Not yet. */
/* Check whether changeover is going on */
@@ -499,12 +512,12 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (l_ptr->next_in_no != l_ptr->checkpoint) {
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
@@ -512,7 +525,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
}
l_ptr->state = WORKING_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
break;
@@ -522,7 +535,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -544,7 +558,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -554,14 +569,14 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->fsm_msg_cnt = 0;
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
@@ -570,8 +585,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, RESET_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
}
@@ -591,24 +606,25 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
- tipc_link_send_sync(l_ptr);
+ tipc_link_sync_xmit(l_ptr);
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
case STARTING_EVT:
- l_ptr->started = 1;
+ l_ptr->flags |= LINK_STARTED;
/* fall through */
case TIMEOUT_EVT:
- tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -626,16 +642,17 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
- tipc_link_send_sync(l_ptr);
+ tipc_link_sync_xmit(l_ptr);
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
break;
case TIMEOUT_EVT:
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -721,11 +738,11 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
}
/*
- * tipc_link_send_buf() is the 'full path' for messages, called from
- * inside TIPC when the 'fast path' in tipc_send_buf
+ * tipc_link_xmit() is the 'full path' for messages, called from
+ * inside TIPC when the 'fast path' in tipc_send_xmit
* has failed, and from link_send()
*/
-int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
u32 size = msg_size(msg);
@@ -753,7 +770,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
/* Fragmentation needed ? */
if (size > max_packet)
- return link_send_long_buf(l_ptr, buf);
+ return tipc_link_frag_xmit(l_ptr, buf);
/* Packet can be queued or sent. */
if (likely(!link_congested(l_ptr))) {
@@ -797,11 +814,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
/*
- * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
- * not been selected yet, and the the owner node is not locked
+ * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
+ * has not been selected yet, and the the owner node is not locked
* Called by TIPC internal users, e.g. the name distributor
*/
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
{
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
@@ -813,7 +830,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
if (l_ptr)
- res = tipc_link_send_buf(l_ptr, buf);
+ res = __tipc_link_xmit(l_ptr, buf);
else
kfree_skb(buf);
tipc_node_unlock(n_ptr);
@@ -825,14 +842,14 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
}
/*
- * tipc_link_send_sync - synchronize broadcast link endpoints.
+ * tipc_link_sync_xmit - synchronize broadcast link endpoints.
*
* Give a newly added peer node the sequence number where it should
* start receiving and acking broadcast packets.
*
* Called with node locked
*/
-static void tipc_link_send_sync(struct tipc_link *l)
+static void tipc_link_sync_xmit(struct tipc_link *l)
{
struct sk_buff *buf;
struct tipc_msg *msg;
@@ -849,14 +866,14 @@ static void tipc_link_send_sync(struct tipc_link *l)
}
/*
- * tipc_link_recv_sync - synchronize broadcast link endpoints.
+ * tipc_link_sync_rcv - synchronize broadcast link endpoints.
* Receive the sequence number where we should start receiving and
* acking broadcast packets from a newly added peer node, and open
* up for reception of such packets.
*
* Called with node locked
*/
-static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -866,7 +883,7 @@ static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
}
/*
- * tipc_link_send_names - send name table entries to new neighbor
+ * tipc_link_names_xmit - send name table entries to new neighbor
*
* Send routine for bulk delivery of name table messages when contact
* with a new neighbor occurs. No link congestion checking is performed
@@ -874,7 +891,7 @@ static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
* small enough not to require fragmentation.
* Called without any locks held.
*/
-void tipc_link_send_names(struct list_head *message_list, u32 dest)
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
{
struct tipc_node *n_ptr;
struct tipc_link *l_ptr;
@@ -909,13 +926,13 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
}
/*
- * link_send_buf_fast: Entry for data messages where the
+ * tipc_link_xmit_fast: Entry for data messages where the
* destination link is known and the header is complete,
* inclusive total message length. Very time critical.
* Link is locked. Returns user data length.
*/
-static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
- u32 *used_max_pkt)
+static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
+ u32 *used_max_pkt)
{
struct tipc_msg *msg = buf_msg(buf);
int res = msg_data_sz(msg);
@@ -931,18 +948,18 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
else
*used_max_pkt = l_ptr->max_pkt;
}
- return tipc_link_send_buf(l_ptr, buf); /* All other cases */
+ return __tipc_link_xmit(l_ptr, buf); /* All other cases */
}
/*
- * tipc_link_send_sections_fast: Entry for messages where the
+ * tipc_link_iovec_xmit_fast: Entry for messages where the
* destination processor is known and the header is complete,
* except for total message length.
* Returns user data length or errno.
*/
-int tipc_link_send_sections_fast(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destaddr)
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destaddr)
{
struct tipc_msg *hdr = &sender->phdr;
struct tipc_link *l_ptr;
@@ -968,8 +985,8 @@ again:
l_ptr = node->active_links[selector];
if (likely(l_ptr)) {
if (likely(buf)) {
- res = link_send_buf_fast(l_ptr, buf,
- &sender->max_pkt);
+ res = tipc_link_xmit_fast(l_ptr, buf,
+ &sender->max_pkt);
exit:
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
@@ -995,24 +1012,21 @@ exit:
if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
goto again;
- return link_send_sections_long(sender, msg_sect, len,
- destaddr);
+ return tipc_link_iovec_long_xmit(sender, msg_sect,
+ len, destaddr);
}
tipc_node_unlock(node);
}
read_unlock_bh(&tipc_net_lock);
/* Couldn't find a link to the destination node */
- if (buf)
- return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
- if (res >= 0)
- return tipc_port_reject_sections(sender, hdr, msg_sect,
- len, TIPC_ERR_NO_NODE);
- return res;
+ kfree_skb(buf);
+ tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
+ return -ENETUNREACH;
}
/*
- * link_send_sections_long(): Entry for long messages where the
+ * tipc_link_iovec_long_xmit(): Entry for long messages where the
* destination node is known and the header is complete,
* inclusive total message length.
* Link and bearer congestion status have been checked to be ok,
@@ -1025,9 +1039,9 @@ exit:
*
* Returns user data length or errno.
*/
-static int link_send_sections_long(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destaddr)
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destaddr)
{
struct tipc_link *l_ptr;
struct tipc_node *node;
@@ -1146,8 +1160,9 @@ error:
} else {
reject:
kfree_skb_list(buf_chain);
- return tipc_port_reject_sections(sender, hdr, msg_sect,
- len, TIPC_ERR_NO_NODE);
+ tipc_port_iovec_reject(sender, hdr, msg_sect, len,
+ TIPC_ERR_NO_NODE);
+ return -ENETUNREACH;
}
/* Append chain of fragments to send queue & send them */
@@ -1391,6 +1406,12 @@ static int link_recv_buf_validate(struct sk_buff *buf)
u32 hdr_size;
u32 min_hdr_size;
+ /* If this packet comes from the defer queue, the skb has already
+ * been validated
+ */
+ if (unlikely(TIPC_SKB_CB(buf)->deferred))
+ return 1;
+
if (unlikely(buf->len < MIN_H_SIZE))
return 0;
@@ -1435,7 +1456,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
u32 seq_no;
u32 ackd;
u32 released = 0;
- int type;
head = head->next;
buf->next = NULL;
@@ -1457,9 +1477,9 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if (unlikely(msg_non_seq(msg))) {
if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_recv_msg(buf, b_ptr);
+ tipc_disc_rcv(buf, b_ptr);
else
- tipc_bclink_recv_pkt(buf);
+ tipc_bclink_rcv(buf);
continue;
}
@@ -1483,7 +1503,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
msg_user(msg) == LINK_PROTOCOL &&
(msg_type(msg) == RESET_MSG ||
- msg_type(msg) == ACTIVATE_MSG) &&
+ msg_type(msg) == ACTIVATE_MSG) &&
!msg_redundant_link(msg))
n_ptr->block_setup &= ~WAIT_PEER_DOWN;
@@ -1502,7 +1522,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
while ((crs != l_ptr->next_out) &&
less_eq(buf_seqno(crs), ackd)) {
struct sk_buff *next = crs->next;
-
kfree_skb(crs);
crs = next;
released++;
@@ -1515,18 +1534,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Try sending any messages link endpoint has pending */
if (unlikely(l_ptr->next_out))
tipc_link_push_queue(l_ptr);
+
if (unlikely(!list_empty(&l_ptr->waiting_ports)))
tipc_link_wakeup_ports(l_ptr, 0);
+
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
l_ptr->stats.sent_acks++;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
- /* Now (finally!) process the incoming message */
-protocol_check:
+ /* Process the incoming packet */
if (unlikely(!link_working_working(l_ptr))) {
if (msg_user(msg) == LINK_PROTOCOL) {
- link_recv_proto_msg(l_ptr, buf);
+ tipc_link_proto_rcv(l_ptr, buf);
head = link_insert_deferred_queue(l_ptr, head);
tipc_node_unlock(n_ptr);
continue;
@@ -1555,67 +1575,65 @@ protocol_check:
l_ptr->next_in_no++;
if (unlikely(l_ptr->oldest_deferred_in))
head = link_insert_deferred_queue(l_ptr, head);
-deliver:
- if (likely(msg_isdata(msg))) {
- tipc_node_unlock(n_ptr);
- tipc_port_recv_msg(buf);
- continue;
+
+ /* Deliver packet/message to correct user: */
+ if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) {
+ if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
+ msg = buf_msg(buf);
+ } else if (msg_user(msg) == MSG_FRAGMENTER) {
+ int rc;
+
+ l_ptr->stats.recv_fragments++;
+ rc = tipc_link_frag_rcv(&l_ptr->reasm_head,
+ &l_ptr->reasm_tail,
+ &buf);
+ if (rc == LINK_REASM_COMPLETE) {
+ l_ptr->stats.recv_fragmented++;
+ msg = buf_msg(buf);
+ } else {
+ if (rc == LINK_REASM_ERROR)
+ tipc_link_reset(l_ptr);
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
}
+
switch (msg_user(msg)) {
- int ret;
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ tipc_node_unlock(n_ptr);
+ tipc_port_rcv(buf);
+ continue;
case MSG_BUNDLER:
l_ptr->stats.recv_bundles++;
l_ptr->stats.recv_bundled += msg_msgcnt(msg);
tipc_node_unlock(n_ptr);
- tipc_link_recv_bundle(buf);
+ tipc_link_bundle_rcv(buf);
continue;
case NAME_DISTRIBUTOR:
n_ptr->bclink.recv_permitted = true;
tipc_node_unlock(n_ptr);
- tipc_named_recv(buf);
- continue;
- case BCAST_PROTOCOL:
- tipc_link_recv_sync(n_ptr, buf);
- tipc_node_unlock(n_ptr);
+ tipc_named_rcv(buf);
continue;
case CONN_MANAGER:
tipc_node_unlock(n_ptr);
- tipc_port_recv_proto_msg(buf);
- continue;
- case MSG_FRAGMENTER:
- l_ptr->stats.recv_fragments++;
- ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
- &l_ptr->reasm_tail,
- &buf);
- if (ret == LINK_REASM_COMPLETE) {
- l_ptr->stats.recv_fragmented++;
- msg = buf_msg(buf);
- goto deliver;
- }
- if (ret == LINK_REASM_ERROR)
- tipc_link_reset(l_ptr);
- tipc_node_unlock(n_ptr);
+ tipc_port_proto_rcv(buf);
continue;
- case CHANGEOVER_PROTOCOL:
- type = msg_type(msg);
- if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
- msg = buf_msg(buf);
- seq_no = msg_seqno(msg);
- if (type == ORIGINAL_MSG)
- goto deliver;
- goto protocol_check;
- }
+ case BCAST_PROTOCOL:
+ tipc_link_sync_rcv(n_ptr, buf);
break;
default:
kfree_skb(buf);
- buf = NULL;
break;
}
tipc_node_unlock(n_ptr);
- tipc_net_route_msg(buf);
continue;
unlock_discard:
-
tipc_node_unlock(n_ptr);
discard:
kfree_skb(buf);
@@ -1682,7 +1700,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
u32 seq_no = buf_seqno(buf);
if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
- link_recv_proto_msg(l_ptr, buf);
+ tipc_link_proto_rcv(l_ptr, buf);
return;
}
@@ -1703,8 +1721,9 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
&l_ptr->newest_deferred_in, buf)) {
l_ptr->deferred_inqueue_sz++;
l_ptr->stats.deferred_recv++;
+ TIPC_SKB_CB(buf)->deferred = true;
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
} else
l_ptr->stats.duplicates++;
}
@@ -1712,9 +1731,8 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
/*
* Send protocol message to the other endpoint.
*/
-void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
- int probe_msg, u32 gap, u32 tolerance,
- u32 priority, u32 ack_mtu)
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
+ u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
{
struct sk_buff *buf = NULL;
struct tipc_msg *msg = l_ptr->pmsg;
@@ -1813,7 +1831,7 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest address rules
*/
-static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
{
u32 rec_gap = 0;
u32 max_pkt_info;
@@ -1932,8 +1950,8 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
msg_last_bcast(msg));
if (rec_gap || (msg_probe(msg))) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, rec_gap, 0, 0, max_pkt_ack);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
+ 0, max_pkt_ack);
}
if (msg_seq_gap(msg)) {
l_ptr->stats.recv_nacks++;
@@ -1972,7 +1990,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
}
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
- tipc_link_send_buf(tunnel, buf);
+ __tipc_link_xmit(tunnel, buf);
}
@@ -2005,7 +2023,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
if (buf) {
skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE);
- tipc_link_send_buf(tunnel, buf);
+ __tipc_link_xmit(tunnel, buf);
} else {
pr_warn("%sunable to send changeover msg\n",
link_co_err);
@@ -2039,7 +2057,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
}
}
-/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
+/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
* duplicate of the first link's send queue via the new link. This way, we
* are guaranteed that currently queued packets from a socket are delivered
* before future traffic from the same socket, even if this is using the
@@ -2048,7 +2066,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
* and sequence order is preserved per sender/receiver socket pair.
* Owner node is locked.
*/
-void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
struct tipc_link *tunnel)
{
struct sk_buff *iter;
@@ -2078,7 +2096,7 @@ void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
length);
- tipc_link_send_buf(tunnel, outbuf);
+ __tipc_link_xmit(tunnel, outbuf);
if (!tipc_link_is_up(l_ptr))
return;
iter = iter->next;
@@ -2105,89 +2123,114 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
return eb;
}
-/* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
- * via other link as result of a failover (ORIGINAL_MSG) or
- * a new active link (DUPLICATE_MSG). Failover packets are
- * returned to the active link for delivery upwards.
+
+
+/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
+ * Owner node is locked.
+ */
+static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *t_buf)
+{
+ struct sk_buff *buf;
+
+ if (!tipc_link_is_up(l_ptr))
+ return;
+
+ buf = buf_extract(t_buf, INT_H_SIZE);
+ if (buf == NULL) {
+ pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
+ return;
+ }
+
+ /* Add buffer to deferred queue, if applicable: */
+ link_handle_out_of_seq_msg(l_ptr, buf);
+}
+
+/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
* Owner node is locked.
*/
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
- struct sk_buff **buf)
+static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *t_buf)
{
- struct sk_buff *tunnel_buf = *buf;
- struct tipc_link *dest_link;
+ struct tipc_msg *t_msg = buf_msg(t_buf);
+ struct sk_buff *buf = NULL;
struct tipc_msg *msg;
- struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
- u32 msg_typ = msg_type(tunnel_msg);
- u32 msg_count = msg_msgcnt(tunnel_msg);
- u32 bearer_id = msg_bearer_id(tunnel_msg);
- if (bearer_id >= MAX_BEARERS)
- goto exit;
- dest_link = (*l_ptr)->owner->links[bearer_id];
- if (!dest_link)
- goto exit;
- if (dest_link == *l_ptr) {
- pr_err("Unexpected changeover message on link <%s>\n",
- (*l_ptr)->name);
- goto exit;
- }
- *l_ptr = dest_link;
- msg = msg_get_wrapped(tunnel_msg);
+ if (tipc_link_is_up(l_ptr))
+ tipc_link_reset(l_ptr);
- if (msg_typ == DUPLICATE_MSG) {
- if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
- goto exit;
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
- if (*buf == NULL) {
- pr_warn("%sduplicate msg dropped\n", link_co_err);
+ /* First failover packet? */
+ if (l_ptr->exp_msg_count == START_CHANGEOVER)
+ l_ptr->exp_msg_count = msg_msgcnt(t_msg);
+
+ /* Should there be an inner packet? */
+ if (l_ptr->exp_msg_count) {
+ l_ptr->exp_msg_count--;
+ buf = buf_extract(t_buf, INT_H_SIZE);
+ if (buf == NULL) {
+ pr_warn("%sno inner failover pkt\n", link_co_err);
goto exit;
}
- kfree_skb(tunnel_buf);
- return 1;
- }
+ msg = buf_msg(buf);
- /* First original message ?: */
- if (tipc_link_is_up(dest_link)) {
- pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
- dest_link->name);
- tipc_link_reset(dest_link);
- dest_link->exp_msg_count = msg_count;
- if (!msg_count)
- goto exit;
- } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
- dest_link->exp_msg_count = msg_count;
- if (!msg_count)
+ if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
+ kfree_skb(buf);
+ buf = NULL;
goto exit;
+ }
+ if (msg_user(msg) == MSG_FRAGMENTER) {
+ l_ptr->stats.recv_fragments++;
+ tipc_link_frag_rcv(&l_ptr->reasm_head,
+ &l_ptr->reasm_tail,
+ &buf);
+ }
}
+exit:
+ if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
+ kfree(l_ptr);
+ }
+ return buf;
+}
+
+/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
+ * via other link as result of a failover (ORIGINAL_MSG) or
+ * a new active link (DUPLICATE_MSG). Failover packets are
+ * returned to the active link for delivery upwards.
+ * Owner node is locked.
+ */
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+ struct sk_buff **buf)
+{
+ struct sk_buff *t_buf = *buf;
+ struct tipc_link *l_ptr;
+ struct tipc_msg *t_msg = buf_msg(t_buf);
+ u32 bearer_id = msg_bearer_id(t_msg);
+
+ *buf = NULL;
- /* Receive original message */
- if (dest_link->exp_msg_count == 0) {
- pr_warn("%sgot too many tunnelled messages\n", link_co_err);
+ if (bearer_id >= MAX_BEARERS)
goto exit;
- }
- dest_link->exp_msg_count--;
- if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
+
+ l_ptr = n_ptr->links[bearer_id];
+ if (!l_ptr)
goto exit;
- } else {
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
- if (*buf != NULL) {
- kfree_skb(tunnel_buf);
- return 1;
- } else {
- pr_warn("%soriginal msg dropped\n", link_co_err);
- }
- }
+
+ if (msg_type(t_msg) == DUPLICATE_MSG)
+ tipc_link_dup_rcv(l_ptr, t_buf);
+ else if (msg_type(t_msg) == ORIGINAL_MSG)
+ *buf = tipc_link_failover_rcv(l_ptr, t_buf);
+ else
+ pr_warn("%sunknown tunnel pkt received\n", link_co_err);
exit:
- *buf = NULL;
- kfree_skb(tunnel_buf);
- return 0;
+ kfree_skb(t_buf);
+ return *buf != NULL;
}
/*
* Bundler functionality:
*/
-void tipc_link_recv_bundle(struct sk_buff *buf)
+void tipc_link_bundle_rcv(struct sk_buff *buf)
{
u32 msgcount = msg_msgcnt(buf_msg(buf));
u32 pos = INT_H_SIZE;
@@ -2210,11 +2253,11 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
*/
/*
- * link_send_long_buf: Entry for buffers needing fragmentation.
+ * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
* The buffer is complete, inclusive total message length.
* Returns user data length.
*/
-static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct sk_buff *buf_chain = NULL;
struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
@@ -2277,12 +2320,11 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
return dsz;
}
-/*
- * tipc_link_recv_fragment(): Called with node lock on. Returns
+/* tipc_link_frag_rcv(): Called with node lock on. Returns
* the reassembled buffer if message is complete.
*/
-int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff **fbuf)
+int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
+ struct sk_buff **fbuf)
{
struct sk_buff *frag = *fbuf;
struct tipc_msg *msg = buf_msg(frag);
@@ -2296,6 +2338,7 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
goto out_free;
*head = frag;
skb_frag_list_init(*head);
+ *fbuf = NULL;
return 0;
} else if (*head &&
skb_try_coalesce(*head, frag, &headstolen, &delta)) {
@@ -2315,10 +2358,12 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
*tail = *head = NULL;
return LINK_REASM_COMPLETE;
}
+ *fbuf = NULL;
return 0;
out_free:
pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
kfree_skb(*fbuf);
+ *fbuf = NULL;
return LINK_REASM_ERROR;
}
@@ -2352,35 +2397,40 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
}
-/**
- * link_find_link - locate link by name
- * @name: ptr to link name string
- * @node: ptr to area to be filled with ptr to associated node
- *
+/* tipc_link_find_owner - locate owner node of link by link's name
+ * @name: pointer to link name string
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
* Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
* this also prevents link deletion.
*
- * Returns pointer to link (or 0 if invalid link name).
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
*/
-static struct tipc_link *link_find_link(const char *name,
- struct tipc_node **node)
+static struct tipc_node *tipc_link_find_owner(const char *link_name,
+ unsigned int *bearer_id)
{
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
+ struct tipc_node *tmp_n_ptr;
+ struct tipc_node *found_node = 0;
+
int i;
- list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ *bearer_id = 0;
+ list_for_each_entry_safe(n_ptr, tmp_n_ptr, &tipc_node_list, list) {
+ tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
l_ptr = n_ptr->links[i];
- if (l_ptr && !strcmp(l_ptr->name, name))
- goto found;
+ if (l_ptr && !strcmp(l_ptr->name, link_name)) {
+ *bearer_id = i;
+ found_node = n_ptr;
+ break;
+ }
}
+ tipc_node_unlock(n_ptr);
+ if (found_node)
+ break;
}
- l_ptr = NULL;
- n_ptr = NULL;
-found:
- *node = n_ptr;
- return l_ptr;
+ return found_node;
}
/**
@@ -2422,32 +2472,33 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
struct tipc_link *l_ptr;
struct tipc_bearer *b_ptr;
struct tipc_media *m_ptr;
+ int bearer_id;
int res = 0;
- l_ptr = link_find_link(name, &node);
- if (l_ptr) {
- /*
- * acquire node lock for tipc_link_send_proto_msg().
- * see "TIPC locking policy" in net.c.
- */
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (node) {
tipc_node_lock(node);
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- link_set_supervision_props(l_ptr, new_value);
- tipc_link_send_proto_msg(l_ptr,
- STATE_MSG, 0, 0, new_value, 0, 0);
- break;
- case TIPC_CMD_SET_LINK_PRI:
- l_ptr->priority = new_value;
- tipc_link_send_proto_msg(l_ptr,
- STATE_MSG, 0, 0, 0, new_value, 0);
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- tipc_link_set_queue_limits(l_ptr, new_value);
- break;
- default:
- res = -EINVAL;
- break;
+ l_ptr = node->links[bearer_id];
+
+ if (l_ptr) {
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ link_set_supervision_props(l_ptr, new_value);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+ new_value, 0, 0);
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ l_ptr->priority = new_value;
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+ 0, new_value, 0);
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ tipc_link_set_queue_limits(l_ptr, new_value);
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
}
tipc_node_unlock(node);
return res;
@@ -2542,6 +2593,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
char *link_name;
struct tipc_link *l_ptr;
struct tipc_node *node;
+ unsigned int bearer_id;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -2552,15 +2604,19 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
return tipc_cfg_reply_error_string("link not found");
return tipc_cfg_reply_none();
}
-
read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(link_name, &node);
- if (!l_ptr) {
+ node = tipc_link_find_owner(link_name, &bearer_id);
+ if (!node) {
read_unlock_bh(&tipc_net_lock);
return tipc_cfg_reply_error_string("link not found");
}
-
tipc_node_lock(node);
+ l_ptr = node->links[bearer_id];
+ if (!l_ptr) {
+ tipc_node_unlock(node);
+ read_unlock_bh(&tipc_net_lock);
+ return tipc_cfg_reply_error_string("link not found");
+ }
link_reset_statistics(l_ptr);
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
@@ -2590,18 +2646,27 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
struct tipc_node *node;
char *status;
u32 profile_total = 0;
+ unsigned int bearer_id;
int ret;
if (!strcmp(name, tipc_bclink_name))
return tipc_bclink_stats(buf, buf_size);
read_lock_bh(&tipc_net_lock);
- l = link_find_link(name, &node);
- if (!l) {
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (!node) {
read_unlock_bh(&tipc_net_lock);
return 0;
}
tipc_node_lock(node);
+
+ l = node->links[bearer_id];
+ if (!l) {
+ tipc_node_unlock(node);
+ read_unlock_bh(&tipc_net_lock);
+ return 0;
+ }
+
s = &l->stats;
if (tipc_link_is_active(l))
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 3b6aa65b608c..8c0b49b5b2ee 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -1,7 +1,7 @@
/*
* net/tipc/link.h: Include file for TIPC link code
*
- * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 1995-2006, 2013, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -40,27 +40,28 @@
#include "msg.h"
#include "node.h"
-/*
- * Link reassembly status codes
+/* Link reassembly status codes
*/
#define LINK_REASM_ERROR -1
#define LINK_REASM_COMPLETE 1
-/*
- * Out-of-range value for link sequence numbers
+/* Out-of-range value for link sequence numbers
*/
#define INVALID_LINK_SEQ 0x10000
-/*
- * Link states
+/* Link working states
*/
#define WORKING_WORKING 560810u
#define WORKING_UNKNOWN 560811u
#define RESET_UNKNOWN 560812u
#define RESET_RESET 560813u
-/*
- * Starting value for maximum packet size negotiation on unicast links
+/* Link endpoint execution states
+ */
+#define LINK_STARTED 0x0001
+#define LINK_STOPPED 0x0002
+
+/* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
*/
#define MAX_PKT_DEFAULT 1500
@@ -102,8 +103,7 @@ struct tipc_stats {
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @owner: pointer to peer node
- * @link_list: adjacent links in bearer's list of links
- * @started: indicates if link has been started
+ * @flags: execution state flags for link endpoint instance
* @checkpoint: reference point for triggering link continuity checking
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
@@ -149,10 +149,9 @@ struct tipc_link {
struct tipc_media_addr media_addr;
struct timer_list timer;
struct tipc_node *owner;
- struct list_head link_list;
/* Management and link supervision data */
- int started;
+ unsigned int flags;
u32 checkpoint;
u32 peer_session;
u32 peer_bearer_id;
@@ -215,10 +214,9 @@ struct tipc_port;
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *l_ptr);
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
- struct tipc_link *dest);
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
int tipc_link_is_up(struct tipc_link *l_ptr);
int tipc_link_is_active(struct tipc_link *l_ptr);
@@ -231,23 +229,24 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
int req_tlv_space);
void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
-void tipc_link_send_names(struct list_head *message_list, u32 dest);
+void tipc_link_reset_list(unsigned int bearer_id);
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest);
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_send_sections_fast(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destnode);
-void tipc_link_recv_bundle(struct sk_buff *buf);
-int tipc_link_recv_fragment(struct sk_buff **reasm_head,
- struct sk_buff **reasm_tail,
- struct sk_buff **fbuf);
-void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
- u32 gap, u32 tolerance, u32 priority,
- u32 acked_mtu);
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destnode);
+void tipc_link_bundle_rcv(struct sk_buff *buf);
+int tipc_link_frag_rcv(struct sk_buff **reasm_head,
+ struct sk_buff **reasm_tail,
+ struct sk_buff **fbuf);
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+ u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
void tipc_link_push_queue(struct tipc_link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff *buf);
+ struct sk_buff *buf);
void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
void tipc_link_retransmit(struct tipc_link *l_ptr,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index e0d08055754e..893c49a3d98a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -138,7 +138,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
if (!buf_copy)
break;
msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
- tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+ tipc_link_xmit(buf_copy, n_ptr->addr, n_ptr->addr);
}
}
@@ -262,7 +262,7 @@ void tipc_named_node_up(unsigned long nodearg)
named_distribute(&message_list, node, &publ_zone, max_item_buf);
read_unlock_bh(&tipc_nametbl_lock);
- tipc_link_send_names(&message_list, node);
+ tipc_link_names_xmit(&message_list, node);
}
/**
@@ -293,9 +293,9 @@ static void named_purge_publ(struct publication *publ)
}
/**
- * tipc_named_recv - process name table update message sent by another node
+ * tipc_named_rcv - process name table update message sent by another node
*/
-void tipc_named_recv(struct sk_buff *buf)
+void tipc_named_rcv(struct sk_buff *buf)
{
struct publication *publ;
struct tipc_msg *msg = buf_msg(buf);
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 1e41bdd4f255..9b312ccfd43e 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -42,7 +42,7 @@
void tipc_named_publish(struct publication *publ);
void tipc_named_withdraw(struct publication *publ);
void tipc_named_node_up(unsigned long node);
-void tipc_named_recv(struct sk_buff *buf);
+void tipc_named_rcv(struct sk_buff *buf);
void tipc_named_reinit(void);
#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7d305ecc09c2..31b606e3916c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -146,19 +146,19 @@ void tipc_net_route_msg(struct sk_buff *buf)
if (tipc_in_scope(dnode, tipc_own_addr)) {
if (msg_isdata(msg)) {
if (msg_mcast(msg))
- tipc_port_recv_mcast(buf, NULL);
+ tipc_port_mcast_rcv(buf, NULL);
else if (msg_destport(msg))
- tipc_port_recv_msg(buf);
+ tipc_port_rcv(buf);
else
net_route_named_msg(buf);
return;
}
switch (msg_user(msg)) {
case NAME_DISTRIBUTOR:
- tipc_named_recv(buf);
+ tipc_named_rcv(buf);
break;
case CONN_MANAGER:
- tipc_port_recv_proto_msg(buf);
+ tipc_port_proto_rcv(buf);
break;
default:
kfree_skb(buf);
@@ -168,7 +168,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
/* Handle message for another node */
skb_trim(buf, msg_size(msg));
- tipc_link_send(buf, dnode, msg_link_selector(msg));
+ tipc_link_xmit(buf, dnode, msg_link_selector(msg));
}
void tipc_net_start(u32 addr)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index efe4d41bf11b..0b0f6c7da965 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -162,7 +162,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
pr_info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
- tipc_link_dup_send_queue(active[0], l_ptr);
+ tipc_link_dup_queue_xmit(active[0], l_ptr);
if (l_ptr->priority == active[0]->priority) {
active[0] = l_ptr;
return;
@@ -249,9 +249,15 @@ void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- n_ptr->links[l_ptr->b_ptr->identity] = NULL;
- atomic_dec(&tipc_num_links);
- n_ptr->link_cnt--;
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (l_ptr != n_ptr->links[i])
+ continue;
+ n_ptr->links[i] = NULL;
+ atomic_dec(&tipc_num_links);
+ n_ptr->link_cnt--;
+ }
}
static void node_established_contact(struct tipc_node *n_ptr)
diff --git a/net/tipc/port.c b/net/tipc/port.c
index b742b2654525..c7c2b549a39e 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -87,10 +87,11 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
}
/**
- * tipc_multicast - send a multicast message to local and remote destinations
+ * tipc_port_mcast_xmit - send a multicast message to local and remote
+ * destinations
*/
-int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
- struct iovec const *msg_sect, unsigned int len)
+int tipc_port_mcast_xmit(u32 ref, struct tipc_name_seq const *seq,
+ struct iovec const *msg_sect, unsigned int len)
{
struct tipc_msg *hdr;
struct sk_buff *buf;
@@ -131,7 +132,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
return -ENOMEM;
}
}
- res = tipc_bclink_send_msg(buf);
+ res = tipc_bclink_xmit(buf);
if ((res < 0) && (dports.count != 0))
kfree_skb(ibuf);
} else {
@@ -140,7 +141,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
if (res >= 0) {
if (ibuf)
- tipc_port_recv_mcast(ibuf, &dports);
+ tipc_port_mcast_rcv(ibuf, &dports);
} else {
tipc_port_list_free(&dports);
}
@@ -148,11 +149,11 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
}
/**
- * tipc_port_recv_mcast - deliver multicast message to all destination ports
+ * tipc_port_mcast_rcv - deliver multicast message to all destination ports
*
* If there is no port list, perform a lookup to create one
*/
-void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
{
struct tipc_msg *msg;
struct tipc_port_list dports = {0, NULL, };
@@ -176,7 +177,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
msg_set_destnode(msg, tipc_own_addr);
if (dp->count == 1) {
msg_set_destport(msg, dp->ports[0]);
- tipc_port_recv_msg(buf);
+ tipc_port_rcv(buf);
tipc_port_list_free(dp);
return;
}
@@ -191,7 +192,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
if ((index == 0) && (cnt != 0))
item = item->next;
msg_set_destport(buf_msg(b), item->ports[index]);
- tipc_port_recv_msg(b);
+ tipc_port_rcv(b);
}
}
exit:
@@ -422,17 +423,17 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
/* send returned message & dispose of rejected message */
src_node = msg_prevnode(msg);
if (in_own_node(src_node))
- tipc_port_recv_msg(rbuf);
+ tipc_port_rcv(rbuf);
else
- tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
+ tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
exit:
kfree_skb(buf);
return data_sz;
}
-int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, unsigned int len,
- int err)
+int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, unsigned int len,
+ int err)
{
struct sk_buff *buf;
int res;
@@ -519,7 +520,7 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 er
return buf;
}
-void tipc_port_recv_proto_msg(struct sk_buff *buf)
+void tipc_port_proto_rcv(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
struct tipc_port *p_ptr;
@@ -760,7 +761,7 @@ int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
return res;
}
-int tipc_connect(u32 ref, struct tipc_portid const *peer)
+int tipc_port_connect(u32 ref, struct tipc_portid const *peer)
{
struct tipc_port *p_ptr;
int res;
@@ -768,17 +769,17 @@ int tipc_connect(u32 ref, struct tipc_portid const *peer)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = __tipc_connect(ref, p_ptr, peer);
+ res = __tipc_port_connect(ref, p_ptr, peer);
tipc_port_unlock(p_ptr);
return res;
}
/*
- * __tipc_connect - connect to a remote peer
+ * __tipc_port_connect - connect to a remote peer
*
* Port must be locked.
*/
-int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
struct tipc_portid const *peer)
{
struct tipc_msg *msg;
@@ -815,7 +816,7 @@ exit:
*
* Port must be locked.
*/
-int __tipc_disconnect(struct tipc_port *tp_ptr)
+int __tipc_port_disconnect(struct tipc_port *tp_ptr)
{
if (tp_ptr->connected) {
tp_ptr->connected = 0;
@@ -828,10 +829,10 @@ int __tipc_disconnect(struct tipc_port *tp_ptr)
}
/*
- * tipc_disconnect(): Disconnect port form peer.
+ * tipc_port_disconnect(): Disconnect port form peer.
* This is a node local operation.
*/
-int tipc_disconnect(u32 ref)
+int tipc_port_disconnect(u32 ref)
{
struct tipc_port *p_ptr;
int res;
@@ -839,15 +840,15 @@ int tipc_disconnect(u32 ref)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = __tipc_disconnect(p_ptr);
+ res = __tipc_port_disconnect(p_ptr);
tipc_port_unlock(p_ptr);
return res;
}
/*
- * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect
*/
-int tipc_shutdown(u32 ref)
+int tipc_port_shutdown(u32 ref)
{
struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
@@ -859,13 +860,13 @@ int tipc_shutdown(u32 ref)
buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
tipc_port_unlock(p_ptr);
tipc_net_route_msg(buf);
- return tipc_disconnect(ref);
+ return tipc_port_disconnect(ref);
}
/**
- * tipc_port_recv_msg - receive message from lower layer and deliver to port user
+ * tipc_port_rcv - receive message from lower layer and deliver to port user
*/
-int tipc_port_recv_msg(struct sk_buff *buf)
+int tipc_port_rcv(struct sk_buff *buf)
{
struct tipc_port *p_ptr;
struct tipc_msg *msg = buf_msg(buf);
@@ -894,19 +895,19 @@ int tipc_port_recv_msg(struct sk_buff *buf)
}
/*
- * tipc_port_recv_sections(): Concatenate and deliver sectioned
- * message for this node.
+ * tipc_port_iovec_rcv: Concatenate and deliver sectioned
+ * message for this node.
*/
-static int tipc_port_recv_sections(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len)
+static int tipc_port_iovec_rcv(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
struct sk_buff *buf;
int res;
res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (likely(buf))
- tipc_port_recv_msg(buf);
+ tipc_port_rcv(buf);
return res;
}
@@ -927,10 +928,10 @@ int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
if (!tipc_port_congested(p_ptr)) {
destnode = port_peernode(p_ptr);
if (likely(!in_own_node(destnode)))
- res = tipc_link_send_sections_fast(p_ptr, msg_sect,
- len, destnode);
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ destnode);
else
- res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
if (likely(res != -ELINKCONG)) {
p_ptr->congested = 0;
@@ -974,13 +975,13 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
if (likely(destport || destnode)) {
if (likely(in_own_node(destnode)))
- res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
else if (tipc_own_addr)
- res = tipc_link_send_sections_fast(p_ptr, msg_sect,
- len, destnode);
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ destnode);
else
- res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
- len, TIPC_ERR_NO_NODE);
+ res = tipc_port_iovec_reject(p_ptr, msg, msg_sect,
+ len, TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
p_ptr->sent++;
@@ -991,8 +992,8 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
}
return -ELINKCONG;
}
- return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
- TIPC_ERR_NO_NAME);
+ return tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
+ TIPC_ERR_NO_NAME);
}
/**
@@ -1017,12 +1018,12 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
msg_set_hdr_sz(msg, BASIC_H_SIZE);
if (in_own_node(dest->node))
- res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
else if (tipc_own_addr)
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
- dest->node);
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ dest->node);
else
- res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+ res = tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 34f12bd4074e..3ec3e94e4334 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -132,25 +132,25 @@ int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
-int tipc_connect(u32 portref, struct tipc_portid const *port);
+int tipc_port_connect(u32 portref, struct tipc_portid const *port);
-int tipc_disconnect(u32 portref);
+int tipc_port_disconnect(u32 portref);
-int tipc_shutdown(u32 ref);
+int tipc_port_shutdown(u32 ref);
/*
* The following routines require that the port be locked on entry
*/
-int __tipc_disconnect(struct tipc_port *tp_ptr);
-int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
+int __tipc_port_disconnect(struct tipc_port *tp_ptr);
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
struct tipc_portid const *peer);
int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
/*
* TIPC messaging routines
*/
-int tipc_port_recv_msg(struct sk_buff *buf);
+int tipc_port_rcv(struct sk_buff *buf);
int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
@@ -159,15 +159,15 @@ int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
int tipc_send2port(u32 portref, struct tipc_portid const *dest,
struct iovec const *msg_sect, unsigned int len);
-int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
- struct iovec const *msg, unsigned int len);
+int tipc_port_mcast_xmit(u32 portref, struct tipc_name_seq const *seq,
+ struct iovec const *msg, unsigned int len);
-int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, unsigned int len,
- int err);
+int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, unsigned int len,
+ int err);
struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
+void tipc_port_proto_rcv(struct sk_buff *buf);
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp);
void tipc_port_reinit(void);
/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index aab4948f0aff..fb885977bd2a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -60,8 +60,8 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
static void wakeupdispatch(struct tipc_port *tport);
static void tipc_data_ready(struct sock *sk, int len);
static void tipc_write_space(struct sock *sk);
-static int release(struct socket *sock);
-static int accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_release(struct socket *sock);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -256,7 +256,7 @@ int tipc_sock_create_local(int type, struct socket **res)
*/
void tipc_sock_release_local(struct socket *sock)
{
- release(sock);
+ tipc_release(sock);
sock->ops = NULL;
sock_release(sock);
}
@@ -282,7 +282,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
if (ret < 0)
return ret;
- ret = accept(sock, *newsock, flags);
+ ret = tipc_accept(sock, *newsock, flags);
if (ret < 0) {
sock_release(*newsock);
return ret;
@@ -292,7 +292,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
}
/**
- * release - destroy a TIPC socket
+ * tipc_release - destroy a TIPC socket
* @sock: socket to destroy
*
* This routine cleans up any messages that are still queued on the socket.
@@ -307,7 +307,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
*
* Returns 0 on success, errno otherwise
*/
-static int release(struct socket *sock)
+static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tipc_port *tport;
@@ -338,7 +338,7 @@ static int release(struct socket *sock)
if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) {
sock->state = SS_DISCONNECTING;
- tipc_disconnect(tport->ref);
+ tipc_port_disconnect(tport->ref);
}
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
}
@@ -364,7 +364,7 @@ static int release(struct socket *sock)
}
/**
- * bind - associate or disassocate TIPC name(s) with a socket
+ * tipc_bind - associate or disassocate TIPC name(s) with a socket
* @sock: socket structure
* @uaddr: socket address describing name(s) and desired operation
* @uaddr_len: size of socket address data structure
@@ -378,7 +378,8 @@ static int release(struct socket *sock)
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
-static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int uaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
@@ -423,7 +424,7 @@ exit:
}
/**
- * get_name - get port ID of socket or peer socket
+ * tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @uaddr_len: area for returned length of socket address
@@ -435,8 +436,8 @@ exit:
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
*/
-static int get_name(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -463,7 +464,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
}
/**
- * poll - read and possibly block on pollmask
+ * tipc_poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
* @wait: ???
@@ -502,8 +503,8 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-static unsigned int poll(struct file *file, struct socket *sock,
- poll_table *wait)
+static unsigned int tipc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
u32 mask = 0;
@@ -590,7 +591,7 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
}
/**
- * send_msg - send message in connectionless manner
+ * tipc_sendmsg - send message in connectionless manner
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
@@ -603,8 +604,8 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-static int send_msg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -671,10 +672,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
res = dest_name_check(dest, m);
if (res)
break;
- res = tipc_multicast(tport->ref,
- &dest->addr.nameseq,
- m->msg_iov,
- total_len);
+ res = tipc_port_mcast_xmit(tport->ref,
+ &dest->addr.nameseq,
+ m->msg_iov,
+ total_len);
}
if (likely(res != -ELINKCONG)) {
if (needs_conn && (res >= 0))
@@ -721,7 +722,7 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
}
/**
- * send_packet - send a connection-oriented message
+ * tipc_send_packet - send a connection-oriented message
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
@@ -731,8 +732,8 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-static int send_packet(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -742,7 +743,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
/* Handle implied connection establishment */
if (unlikely(dest))
- return send_msg(iocb, sock, m, total_len);
+ return tipc_sendmsg(iocb, sock, m, total_len);
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
@@ -774,7 +775,7 @@ exit:
}
/**
- * send_stream - send stream-oriented data
+ * tipc_send_stream - send stream-oriented data
* @iocb: (unused)
* @sock: socket structure
* @m: data to send
@@ -785,8 +786,8 @@ exit:
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
-static int send_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -806,7 +807,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
/* Handle special cases where there is no connection */
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_UNCONNECTED)
- res = send_packet(NULL, sock, m, total_len);
+ res = tipc_send_packet(NULL, sock, m, total_len);
else
res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
goto exit;
@@ -851,7 +852,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
bytes_to_send = curr_left;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
- res = send_packet(NULL, sock, &my_msg, bytes_to_send);
+ res = tipc_send_packet(NULL, sock, &my_msg,
+ bytes_to_send);
if (res < 0) {
if (bytes_sent)
res = bytes_sent;
@@ -888,7 +890,7 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg)
if (!p_ptr)
return -EINVAL;
- __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
+ __tipc_port_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
@@ -1023,7 +1025,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
}
/**
- * recv_msg - receive packet-oriented message
+ * tipc_recvmsg - receive packet-oriented message
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
@@ -1034,8 +1036,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
*
* Returns size of returned message data, errno otherwise
*/
-static int recv_msg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1117,7 +1119,7 @@ exit:
}
/**
- * recv_stream - receive stream-oriented data
+ * tipc_recv_stream - receive stream-oriented data
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
@@ -1128,8 +1130,8 @@ exit:
*
* Returns size of returned message data, errno otherwise
*/
-static int recv_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1291,7 +1293,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
- __tipc_disconnect(tsock->p);
+ __tipc_port_disconnect(tsock->p);
}
retval = TIPC_OK;
}
@@ -1506,7 +1508,7 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
}
/**
- * connect - establish a connection to another TIPC port
+ * tipc_connect - establish a connection to another TIPC port
* @sock: socket structure
* @dest: socket address for destination port
* @destlen: size of socket address data structure
@@ -1514,8 +1516,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
*
* Returns 0 on success, errno otherwise
*/
-static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
- int flags)
+static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+ int destlen, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
@@ -1556,7 +1558,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
if (!timeout)
m.msg_flags = MSG_DONTWAIT;
- res = send_msg(NULL, sock, &m, 0);
+ res = tipc_sendmsg(NULL, sock, &m, 0);
if ((res < 0) && (res != -EWOULDBLOCK))
goto exit;
@@ -1587,13 +1589,13 @@ exit:
}
/**
- * listen - allow socket to listen for incoming connections
+ * tipc_listen - allow socket to listen for incoming connections
* @sock: socket structure
* @len: (unused)
*
* Returns 0 on success, errno otherwise
*/
-static int listen(struct socket *sock, int len)
+static int tipc_listen(struct socket *sock, int len)
{
struct sock *sk = sock->sk;
int res;
@@ -1648,14 +1650,14 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
}
/**
- * accept - wait for connection request
+ * tipc_accept - wait for connection request
* @sock: listening socket
* @newsock: new socket that is to be connected
* @flags: file-related flags associated with socket
*
* Returns 0 on success, errno otherwise
*/
-static int accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
{
struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
@@ -1702,7 +1704,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
/* Connect new socket to it's peer */
new_tsock->peer_name.ref = msg_origport(msg);
new_tsock->peer_name.node = msg_orignode(msg);
- tipc_connect(new_ref, &new_tsock->peer_name);
+ tipc_port_connect(new_ref, &new_tsock->peer_name);
new_sock->state = SS_CONNECTED;
tipc_set_portimportance(new_ref, msg_importance(msg));
@@ -1719,7 +1721,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
struct msghdr m = {NULL,};
advance_rx_queue(sk);
- send_packet(NULL, new_sock, &m, 0);
+ tipc_send_packet(NULL, new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
@@ -1733,7 +1735,7 @@ exit:
}
/**
- * shutdown - shutdown socket connection
+ * tipc_shutdown - shutdown socket connection
* @sock: socket structure
* @how: direction to close (must be SHUT_RDWR)
*
@@ -1741,7 +1743,7 @@ exit:
*
* Returns 0 on success, errno otherwise
*/
-static int shutdown(struct socket *sock, int how)
+static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1765,10 +1767,10 @@ restart:
kfree_skb(buf);
goto restart;
}
- tipc_disconnect(tport->ref);
+ tipc_port_disconnect(tport->ref);
tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
} else {
- tipc_shutdown(tport->ref);
+ tipc_port_shutdown(tport->ref);
}
sock->state = SS_DISCONNECTING;
@@ -1794,7 +1796,7 @@ restart:
}
/**
- * setsockopt - set socket option
+ * tipc_setsockopt - set socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
@@ -1806,8 +1808,8 @@ restart:
*
* Returns 0 on success, errno otherwise
*/
-static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
- unsigned int ol)
+static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1853,7 +1855,7 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
}
/**
- * getsockopt - get socket option
+ * tipc_getsockopt - get socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
@@ -1865,8 +1867,8 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
*
* Returns 0 on success, errno otherwise
*/
-static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
- int __user *ol)
+static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1927,20 +1929,20 @@ static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
- .getname = get_name,
- .poll = poll,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_msg,
- .recvmsg = recv_msg,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_sendmsg,
+ .recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
@@ -1948,20 +1950,20 @@ static const struct proto_ops msg_ops = {
static const struct proto_ops packet_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
.ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_packet,
- .recvmsg = recv_msg,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_packet,
+ .recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
@@ -1969,20 +1971,20 @@ static const struct proto_ops packet_ops = {
static const struct proto_ops stream_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
.ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_stream,
- .recvmsg = recv_stream,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_stream,
+ .recvmsg = tipc_recv_stream,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 11ee4ed04f73..68602be07cc1 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -27,9 +27,10 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
err = rdev_stop_ap(rdev, dev);
if (!err) {
wdev->beacon_interval = 0;
- wdev->channel = NULL;
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
wdev->ssid_len = 0;
rdev_set_qos_map(rdev, dev, NULL);
+ nl80211_send_ap_stopped(wdev);
}
return err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 78559b5bbd1f..2d4268c5529d 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -642,7 +642,8 @@ int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
void
cfg80211_get_chan_state(struct wireless_dev *wdev,
struct ieee80211_channel **chan,
- enum cfg80211_chan_mode *chanmode)
+ enum cfg80211_chan_mode *chanmode,
+ u8 *radar_detect)
{
*chan = NULL;
*chanmode = CHAN_MODE_UNDEFINED;
@@ -660,6 +661,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
!wdev->ibss_dfs_possible)
? CHAN_MODE_SHARED
: CHAN_MODE_EXCLUSIVE;
+
+ /* consider worst-case - IBSS can try to return to the
+ * original user-specified channel as creator */
+ if (wdev->ibss_dfs_possible)
+ *radar_detect |= BIT(wdev->chandef.width);
return;
}
break;
@@ -674,17 +680,26 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (wdev->cac_started) {
- *chan = wdev->channel;
+ *chan = wdev->chandef.chan;
*chanmode = CHAN_MODE_SHARED;
+ *radar_detect |= BIT(wdev->chandef.width);
} else if (wdev->beacon_interval) {
- *chan = wdev->channel;
+ *chan = wdev->chandef.chan;
*chanmode = CHAN_MODE_SHARED;
+
+ if (cfg80211_chandef_dfs_required(wdev->wiphy,
+ &wdev->chandef))
+ *radar_detect |= BIT(wdev->chandef.width);
}
return;
case NL80211_IFTYPE_MESH_POINT:
if (wdev->mesh_id_len) {
- *chan = wdev->channel;
+ *chan = wdev->chandef.chan;
*chanmode = CHAN_MODE_SHARED;
+
+ if (cfg80211_chandef_dfs_required(wdev->wiphy,
+ &wdev->chandef))
+ *radar_detect |= BIT(wdev->chandef.width);
}
return;
case NL80211_IFTYPE_MONITOR:
@@ -701,6 +716,4 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
case NUM_NL80211_IFTYPES:
WARN_ON(1);
}
-
- return;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 010892b81a06..76ae6a605abb 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -737,7 +737,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
}
EXPORT_SYMBOL(cfg80211_unregister_wdev);
-static struct device_type wiphy_type = {
+static const struct device_type wiphy_type = {
.name = "wlan",
};
diff --git a/net/wireless/core.h b/net/wireless/core.h
index f1d193b557b6..40683004d523 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -211,6 +211,7 @@ struct cfg80211_event {
} dc;
struct {
u8 bssid[ETH_ALEN];
+ struct ieee80211_channel *channel;
} ij;
};
};
@@ -258,7 +259,8 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel);
int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
@@ -443,7 +445,8 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
void
cfg80211_get_chan_state(struct wireless_dev *wdev,
struct ieee80211_channel **chan,
- enum cfg80211_chan_mode *chanmode);
+ enum cfg80211_chan_mode *chanmode,
+ u8 *radar_detect);
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index f911c5f9f903..1470b90e438f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -14,7 +14,8 @@
#include "rdev-ops.h"
-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_bss *bss;
@@ -28,8 +29,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
if (!wdev->ssid_len)
return;
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
- wdev->ssid, wdev->ssid_len,
+ bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
if (WARN_ON(!bss))
@@ -54,21 +54,26 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
#endif
}
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
- trace_cfg80211_ibss_joined(dev, bssid);
+ trace_cfg80211_ibss_joined(dev, bssid, channel);
+
+ if (WARN_ON(!channel))
+ return;
ev = kzalloc(sizeof(*ev), gfp);
if (!ev)
return;
ev->type = EVENT_IBSS_JOINED;
- memcpy(ev->cr.bssid, bssid, ETH_ALEN);
+ memcpy(ev->ij.bssid, bssid, ETH_ALEN);
+ ev->ij.channel = channel;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
@@ -117,6 +122,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
wdev->ibss_fixed = params->channel_fixed;
wdev->ibss_dfs_possible = params->userspace_handles_dfs;
+ wdev->chandef = params->chandef;
#ifdef CONFIG_CFG80211_WEXT
wdev->wext.ibss.chandef = params->chandef;
#endif
@@ -200,6 +206,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
wdev->current_bss = NULL;
wdev->ssid_len = 0;
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
#ifdef CONFIG_CFG80211_WEXT
if (!nowext)
wdev->wext.ibss.ssid_len = 0;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 885862447b63..d42a3fcb2f67 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -195,7 +195,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!err) {
memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
wdev->mesh_id_len = setup->mesh_id_len;
- wdev->channel = setup->chandef.chan;
+ wdev->chandef = setup->chandef;
}
return err;
@@ -244,7 +244,7 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
chandef->chan);
if (!err)
- wdev->channel = chandef->chan;
+ wdev->chandef = *chandef;
return err;
}
@@ -276,7 +276,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
err = rdev_leave_mesh(rdev, dev);
if (!err) {
wdev->mesh_id_len = 0;
- wdev->channel = NULL;
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
rdev_set_qos_map(rdev, dev, NULL);
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 52cca05044a8..d47c9d127b1e 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -772,7 +772,7 @@ void cfg80211_cac_event(struct net_device *netdev,
if (WARN_ON(!wdev->cac_started))
return;
- if (WARN_ON(!wdev->channel))
+ if (WARN_ON(!wdev->chandef.chan))
return;
switch (event) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4fe2e6e2bc76..8e6b6a2d35cb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -382,6 +382,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
[NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
.len = IEEE80211_QOS_MAP_LEN_MAX },
+ [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
+ [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -855,6 +857,19 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
return 0;
}
+static struct ieee80211_channel *nl80211_get_valid_chan(struct wiphy *wiphy,
+ struct nlattr *tb)
+{
+ struct ieee80211_channel *chan;
+
+ if (tb == NULL)
+ return NULL;
+ chan = ieee80211_get_channel(wiphy, nla_get_u32(tb));
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+ return NULL;
+ return chan;
+}
+
static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
{
struct nlattr *nl_modes = nla_nest_start(msg, attr);
@@ -1586,6 +1601,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
(nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
goto nla_put_failure;
+
+ if (dev->wiphy.max_ap_assoc_sta &&
+ nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
+ dev->wiphy.max_ap_assoc_sta))
+ goto nla_put_failure;
+
state->split_start++;
break;
case 11:
@@ -2035,10 +2056,12 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
rem_txq_params) {
- nla_parse(tb, NL80211_TXQ_ATTR_MAX,
- nla_data(nl_txq_params),
- nla_len(nl_txq_params),
- txq_params_policy);
+ result = nla_parse(tb, NL80211_TXQ_ATTR_MAX,
+ nla_data(nl_txq_params),
+ nla_len(nl_txq_params),
+ txq_params_policy);
+ if (result)
+ return result;
result = parse_txq_params(tb, &txq_params);
if (result)
return result;
@@ -3259,7 +3282,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!err) {
wdev->preset_chandef = params.chandef;
wdev->beacon_interval = params.beacon_interval;
- wdev->channel = params.chandef.chan;
+ wdev->chandef = params.chandef;
wdev->ssid_len = params.ssid_len;
memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
}
@@ -3902,8 +3925,8 @@ static struct net_device *get_vlan(struct genl_info *info,
return ERR_PTR(ret);
}
-static struct nla_policy
-nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
+static const struct nla_policy
+nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
[NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
};
@@ -4604,8 +4627,6 @@ static int parse_reg_rule(struct nlattr *tb[],
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_END])
return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
- return -EINVAL;
if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
return -EINVAL;
@@ -4615,8 +4636,9 @@ static int parse_reg_rule(struct nlattr *tb[],
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
freq_range->end_freq_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
- freq_range->max_bandwidth_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
+ if (tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
+ freq_range->max_bandwidth_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
power_rule->max_eirp =
nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
@@ -5086,6 +5108,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
const struct ieee80211_reg_rule *reg_rule;
const struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule;
+ unsigned int max_bandwidth_khz;
reg_rule = &regdom->reg_rules[i];
freq_range = &reg_rule->freq_range;
@@ -5095,6 +5118,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
if (!nl_reg_rule)
goto nla_put_failure_rcu;
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ if (!max_bandwidth_khz)
+ max_bandwidth_khz = reg_get_max_bandwidth(regdom,
+ reg_rule);
+
if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
reg_rule->flags) ||
nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
@@ -5102,7 +5130,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
freq_range->end_freq_khz) ||
nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
- freq_range->max_bandwidth_khz) ||
+ max_bandwidth_khz) ||
nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
power_rule->max_antenna_gain) ||
nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
@@ -5178,9 +5206,11 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
rem_reg_rules) {
- nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
- nla_data(nl_reg_rule), nla_len(nl_reg_rule),
- reg_rule_policy);
+ r = nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
+ nla_data(nl_reg_rule), nla_len(nl_reg_rule),
+ reg_rule_policy);
+ if (r)
+ goto bad_reg;
r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
if (r)
goto bad_reg;
@@ -5443,6 +5473,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
enum ieee80211_band band;
size_t ie_len;
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
+ s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_start)
@@ -5477,11 +5508,40 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (n_ssids > wiphy->max_sched_scan_ssids)
return -EINVAL;
- if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+ /*
+ * First, count the number of 'real' matchsets. Due to an issue with
+ * the old implementation, matchsets containing only the RSSI attribute
+ * (NL80211_SCHED_SCAN_MATCH_ATTR_RSSI) are considered as the 'default'
+ * RSSI for all matchsets, rather than their own matchset for reporting
+ * all APs with a strong RSSI. This is needed to be compatible with
+ * older userspace that treated a matchset with only the RSSI as the
+ * global RSSI for all other matchsets - if there are other matchsets.
+ */
+ if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
nla_for_each_nested(attr,
info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
- tmp)
- n_match_sets++;
+ tmp) {
+ struct nlattr *rssi;
+
+ err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_match_policy);
+ if (err)
+ return err;
+ /* add other standalone attributes here */
+ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) {
+ n_match_sets++;
+ continue;
+ }
+ rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+ if (rssi)
+ default_match_rssi = nla_get_s32(rssi);
+ }
+ }
+
+ /* However, if there's no other matchset, add the RSSI one */
+ if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF)
+ n_match_sets = 1;
if (n_match_sets > wiphy->max_match_sets)
return -EINVAL;
@@ -5602,11 +5662,22 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
tmp) {
struct nlattr *ssid, *rssi;
- nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
- nla_data(attr), nla_len(attr),
- nl80211_match_policy);
+ err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_match_policy);
+ if (err)
+ goto out_free;
ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
if (ssid) {
+ if (WARN_ON(i >= n_match_sets)) {
+ /* this indicates a programming error,
+ * the loop above should have verified
+ * things properly
+ */
+ err = -EINVAL;
+ goto out_free;
+ }
+
if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
@@ -5615,15 +5686,28 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
nla_data(ssid), nla_len(ssid));
request->match_sets[i].ssid.ssid_len =
nla_len(ssid);
+ /* special attribute - old implemenation w/a */
+ request->match_sets[i].rssi_thold =
+ default_match_rssi;
+ rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+ if (rssi)
+ request->match_sets[i].rssi_thold =
+ nla_get_s32(rssi);
}
- rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
- if (rssi)
- request->rssi_thold = nla_get_u32(rssi);
- else
- request->rssi_thold =
- NL80211_SCAN_RSSI_THOLD_OFF;
i++;
}
+
+ /* there was no other matchset, so the RSSI one is alone */
+ if (i == 0)
+ request->match_sets[0].rssi_thold = default_match_rssi;
+
+ request->min_rssi_thold = INT_MAX;
+ for (i = 0; i < n_match_sets; i++)
+ request->min_rssi_thold =
+ min(request->match_sets[i].rssi_thold,
+ request->min_rssi_thold);
+ } else {
+ request->min_rssi_thold = NL80211_SCAN_RSSI_THOLD_OFF;
}
if (info->attrs[NL80211_ATTR_IE]) {
@@ -5719,7 +5803,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
if (!err) {
- wdev->channel = chandef.chan;
+ wdev->chandef = chandef;
wdev->cac_started = true;
wdev->cac_start_time = jiffies;
}
@@ -5751,10 +5835,15 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
/* useless if AP is not running */
if (!wdev->beacon_interval)
- return -EINVAL;
+ return -ENOTCONN;
break;
case NL80211_IFTYPE_ADHOC:
+ if (!wdev->ssid_len)
+ return -ENOTCONN;
+ break;
case NL80211_IFTYPE_MESH_POINT:
+ if (!wdev->mesh_id_len)
+ return -ENOTCONN;
break;
default:
return -EOPNOTSUPP;
@@ -6192,9 +6281,9 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- chan = ieee80211_get_channel(&rdev->wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
+ chan = nl80211_get_valid_chan(&rdev->wiphy,
+ info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ if (!chan)
return -EINVAL;
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -6347,9 +6436,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- chan = ieee80211_get_channel(&rdev->wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
+ chan = nl80211_get_valid_chan(&rdev->wiphy,
+ info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ if (!chan)
return -EINVAL;
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -6985,6 +7074,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ else if (info->attrs[NL80211_ATTR_MAC_HINT])
+ connect.bssid_hint =
+ nla_data(info->attrs[NL80211_ATTR_MAC_HINT]);
connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -7003,11 +7095,14 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
- connect.channel =
- ieee80211_get_channel(wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!connect.channel ||
- connect.channel->flags & IEEE80211_CHAN_DISABLED)
+ connect.channel = nl80211_get_valid_chan(
+ wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ if (!connect.channel)
+ return -EINVAL;
+ } else if (info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]) {
+ connect.channel_hint = nl80211_get_valid_chan(
+ wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]);
+ if (!connect.channel_hint)
return -EINVAL;
}
@@ -7421,6 +7516,7 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
[NL80211_TXRATE_HT] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_HT_RATES },
[NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+ [NL80211_TXRATE_GI] = { .type = NLA_U8 },
};
static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
@@ -7467,16 +7563,19 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
* directly to the enum ieee80211_band values used in cfg80211.
*/
BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
- nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
- {
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
enum ieee80211_band band = nla_type(tx_rates);
+ int err;
+
if (band < 0 || band >= IEEE80211_NUM_BANDS)
return -EINVAL;
sband = rdev->wiphy.bands[band];
if (sband == NULL)
return -EINVAL;
- nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
- nla_len(tx_rates), nl80211_txattr_policy);
+ err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (err)
+ return err;
if (tb[NL80211_TXRATE_LEGACY]) {
mask.control[band].legacy = rateset_to_mask(
sband,
@@ -7501,6 +7600,12 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
mask.control[band].vht_mcs))
return -EINVAL;
}
+ if (tb[NL80211_TXRATE_GI]) {
+ mask.control[band].gi =
+ nla_get_u8(tb[NL80211_TXRATE_GI]);
+ if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+ }
if (mask.control[band].legacy == 0) {
/* don't allow empty legacy rates if HT or VHT
@@ -7777,8 +7882,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static struct nla_policy
-nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
+static const struct nla_policy
+nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = {
[NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
@@ -11107,7 +11212,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
wdev->iftype != NL80211_IFTYPE_MESH_POINT))
return;
- wdev->channel = chandef->chan;
+ wdev->chandef = *chandef;
+ wdev->preset_chandef = *chandef;
nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
@@ -11621,6 +11727,35 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
}
EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
+void nl80211_send_ap_stopped(struct wireless_dev *wdev)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STOP_AP);
+ if (!hdr)
+ goto out;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+ goto out;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0,
+ NL80211_MCGRP_MLME, GFP_KERNEL);
+ return;
+ out:
+ nlmsg_free(msg);
+}
+
/* initialisation/exit functions */
int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 75799746d845..1e6df9630f42 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -74,6 +74,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
+void nl80211_send_ap_stopped(struct wireless_dev *wdev);
+
void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 9b897fca7487..27c5253e7a61 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -91,7 +91,7 @@ static struct regulatory_request __rcu *last_request =
/* To trigger userspace events */
static struct platform_device *reg_pdev;
-static struct device_type reg_device_type = {
+static const struct device_type reg_device_type = {
.uevent = reg_device_uevent,
};
@@ -522,6 +522,77 @@ bool reg_is_valid_request(const char *alpha2)
return alpha2_equal(lr->alpha2, alpha2);
}
+static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy)
+{
+ struct regulatory_request *lr = get_last_request();
+
+ /*
+ * Follow the driver's regulatory domain, if present, unless a country
+ * IE has been processed or a user wants to help complaince further
+ */
+ if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ lr->initiator != NL80211_REGDOM_SET_BY_USER &&
+ wiphy->regd)
+ return get_wiphy_regdom(wiphy);
+
+ return get_cfg80211_regdom();
+}
+
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+ const struct ieee80211_reg_rule *rule)
+{
+ const struct ieee80211_freq_range *freq_range = &rule->freq_range;
+ const struct ieee80211_freq_range *freq_range_tmp;
+ const struct ieee80211_reg_rule *tmp;
+ u32 start_freq, end_freq, idx, no;
+
+ for (idx = 0; idx < rd->n_reg_rules; idx++)
+ if (rule == &rd->reg_rules[idx])
+ break;
+
+ if (idx == rd->n_reg_rules)
+ return 0;
+
+ /* get start_freq */
+ no = idx;
+
+ while (no) {
+ tmp = &rd->reg_rules[--no];
+ freq_range_tmp = &tmp->freq_range;
+
+ if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz)
+ break;
+
+ if (freq_range_tmp->max_bandwidth_khz)
+ break;
+
+ freq_range = freq_range_tmp;
+ }
+
+ start_freq = freq_range->start_freq_khz;
+
+ /* get end_freq */
+ freq_range = &rule->freq_range;
+ no = idx;
+
+ while (no < rd->n_reg_rules - 1) {
+ tmp = &rd->reg_rules[++no];
+ freq_range_tmp = &tmp->freq_range;
+
+ if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz)
+ break;
+
+ if (freq_range_tmp->max_bandwidth_khz)
+ break;
+
+ freq_range = freq_range_tmp;
+ }
+
+ end_freq = freq_range->end_freq_khz;
+
+ return end_freq - start_freq;
+}
+
/* Sanity check on a regulatory rule */
static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
{
@@ -630,7 +701,9 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
* Helper for regdom_intersect(), this does the real
* mathematical intersection fun
*/
-static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
+static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
+ const struct ieee80211_regdomain *rd2,
+ const struct ieee80211_reg_rule *rule1,
const struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *intersected_rule)
{
@@ -638,7 +711,7 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule1, *power_rule2;
struct ieee80211_power_rule *power_rule;
- u32 freq_diff;
+ u32 freq_diff, max_bandwidth1, max_bandwidth2;
freq_range1 = &rule1->freq_range;
freq_range2 = &rule2->freq_range;
@@ -652,8 +725,24 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
freq_range2->start_freq_khz);
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
freq_range2->end_freq_khz);
- freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz,
- freq_range2->max_bandwidth_khz);
+
+ max_bandwidth1 = freq_range1->max_bandwidth_khz;
+ max_bandwidth2 = freq_range2->max_bandwidth_khz;
+
+ /*
+ * In case max_bandwidth1 == 0 and max_bandwith2 == 0 set
+ * output bandwidth as 0 (auto calculation). Next we will
+ * calculate this correctly in handle_channel function.
+ * In other case calculate output bandwidth here.
+ */
+ if (max_bandwidth1 || max_bandwidth2) {
+ if (!max_bandwidth1)
+ max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1);
+ if (!max_bandwidth2)
+ max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2);
+ }
+
+ freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2);
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_range->max_bandwidth_khz > freq_diff)
@@ -713,7 +802,8 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
rule1 = &rd1->reg_rules[x];
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
- if (!reg_rules_intersect(rule1, rule2, &dummy_rule))
+ if (!reg_rules_intersect(rd1, rd2, rule1, rule2,
+ &dummy_rule))
num_rules++;
}
}
@@ -738,7 +828,8 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
* a memcpy()
*/
intersected_rule = &rd->reg_rules[rule_idx];
- r = reg_rules_intersect(rule1, rule2, intersected_rule);
+ r = reg_rules_intersect(rd1, rd2, rule1, rule2,
+ intersected_rule);
/*
* No need to memset here the intersected rule here as
* we're not using the stack anymore
@@ -821,18 +912,8 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
u32 center_freq)
{
const struct ieee80211_regdomain *regd;
- struct regulatory_request *lr = get_last_request();
- /*
- * Follow the driver's regulatory domain, if present, unless a country
- * IE has been processed or a user wants to help complaince further
- */
- if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- lr->initiator != NL80211_REGDOM_SET_BY_USER &&
- wiphy->regd)
- regd = get_wiphy_regdom(wiphy);
- else
- regd = get_cfg80211_regdom();
+ regd = reg_get_regdomain(wiphy);
return freq_reg_info_regd(wiphy, center_freq, regd);
}
@@ -903,6 +984,8 @@ static void handle_channel(struct wiphy *wiphy,
const struct ieee80211_freq_range *freq_range = NULL;
struct wiphy *request_wiphy = NULL;
struct regulatory_request *lr = get_last_request();
+ const struct ieee80211_regdomain *regd;
+ u32 max_bandwidth_khz;
request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -944,11 +1027,18 @@ static void handle_channel(struct wiphy *wiphy,
power_rule = &reg_rule->power_rule;
freq_range = &reg_rule->freq_range;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ /* Check if auto calculation requested */
+ if (!max_bandwidth_khz) {
+ regd = reg_get_regdomain(wiphy);
+ max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ }
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
bw_flags |= IEEE80211_CHAN_NO_160MHZ;
if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
@@ -1334,6 +1424,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
+ u32 max_bandwidth_khz;
reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
regd);
@@ -1351,11 +1442,16 @@ static void handle_channel_custom(struct wiphy *wiphy,
power_rule = &reg_rule->power_rule;
freq_range = &reg_rule->freq_range;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ /* Check if auto calculation requested */
+ if (!max_bandwidth_khz)
+ max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
bw_flags |= IEEE80211_CHAN_NO_160MHZ;
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
@@ -1683,17 +1779,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
struct wiphy *wiphy = NULL;
enum reg_request_treatment treatment;
- if (WARN_ON(!reg_request->alpha2))
- return;
-
if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
- if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) {
- kfree(reg_request);
- return;
- }
-
switch (reg_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
reg_process_hint_core(reg_request);
@@ -1703,23 +1791,33 @@ static void reg_process_hint(struct regulatory_request *reg_request)
if (treatment == REG_REQ_OK ||
treatment == REG_REQ_ALREADY_SET)
return;
- schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, msecs_to_jiffies(3142));
return;
case NL80211_REGDOM_SET_BY_DRIVER:
+ if (!wiphy)
+ goto out_free;
treatment = reg_process_hint_driver(wiphy, reg_request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ if (!wiphy)
+ goto out_free;
treatment = reg_process_hint_country_ie(wiphy, reg_request);
break;
default:
WARN(1, "invalid initiator %d\n", reg_request->initiator);
- return;
+ goto out_free;
}
/* This is required so that the orig_* parameters are saved */
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG)
wiphy_update_regulatory(wiphy, reg_request->initiator);
+
+ return;
+
+out_free:
+ kfree(reg_request);
}
/*
@@ -2147,6 +2245,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
+ char bw[32];
pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
@@ -2155,22 +2254,29 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
freq_range = &reg_rule->freq_range;
power_rule = &reg_rule->power_rule;
+ if (!freq_range->max_bandwidth_khz)
+ snprintf(bw, 32, "%d KHz, AUTO",
+ reg_get_max_bandwidth(rd, reg_rule));
+ else
+ snprintf(bw, 32, "%d KHz",
+ freq_range->max_bandwidth_khz);
+
/*
* There may not be documentation for max antenna gain
* in certain regions
*/
if (power_rule->max_antenna_gain)
- pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
- freq_range->max_bandwidth_khz,
+ bw,
power_rule->max_antenna_gain,
power_rule->max_eirp);
else
- pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %s), (N/A, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
- freq_range->max_bandwidth_khz,
+ bw,
power_rule->max_eirp);
}
}
@@ -2294,7 +2400,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
if (!request_wiphy) {
- schedule_delayed_work(&reg_timeout, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, 0);
return -ENODEV;
}
@@ -2354,7 +2461,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
if (!request_wiphy) {
- schedule_delayed_work(&reg_timeout, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, 0);
return -ENODEV;
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 02bd8f4b0921..18524617ab62 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -34,6 +34,8 @@ int __init regulatory_init(void);
void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+ const struct ieee80211_reg_rule *rule);
bool reg_last_request_cell_base(void);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index fbcc23edee54..5eaeed59db07 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2278,11 +2278,6 @@ DECLARE_EVENT_CLASS(cfg80211_rx_evt,
TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
);
-DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr)
-);
-
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
TP_PROTO(struct net_device *netdev, const u8 *addr),
TP_ARGS(netdev, addr)
@@ -2293,6 +2288,24 @@ DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
TP_ARGS(netdev, addr)
);
+TRACE_EVENT(cfg80211_ibss_joined,
+ TP_PROTO(struct net_device *netdev, const u8 *bssid,
+ struct ieee80211_channel *channel),
+ TP_ARGS(netdev, bssid, channel),
+ TP_STRUCT__entry(
+ NETDEV_ENTRY
+ MAC_ENTRY(bssid)
+ CHAN_ENTRY
+ ),
+ TP_fast_assign(
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(bssid, bssid);
+ CHAN_ASSIGN(channel);
+ ),
+ TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT,
+ NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+);
+
TRACE_EVENT(cfg80211_probe_status,
TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
bool acked),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d39c37104ae2..780b4546c9c7 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -820,7 +820,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
ev->dc.reason, true);
break;
case EVENT_IBSS_JOINED:
- __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
+ __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
+ ev->ij.channel);
break;
}
wdev_unlock(wdev);
@@ -1356,7 +1357,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
*/
mutex_lock_nested(&wdev_iter->mtx, 1);
__acquire(wdev_iter->mtx);
- cfg80211_get_chan_state(wdev_iter, &ch, &chmode);
+ cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
wdev_unlock(wdev_iter);
switch (chmode) {
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 23708636b05c..25e5cb0aaef6 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -210,8 +210,8 @@ static void do_usb_entry(void *symval,
range_lo < 0x9 ? "[%X-9" : "[%X",
range_lo);
sprintf(alias + strlen(alias),
- range_hi > 0xA ? "a-%X]" : "%X]",
- range_lo);
+ range_hi > 0xA ? "A-%X]" : "%X]",
+ range_hi);
}
}
if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ec4536c8d8d4..dafcf82139e2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -932,7 +932,7 @@ int snd_hda_bus_new(struct snd_card *card,
}
EXPORT_SYMBOL_GPL(snd_hda_bus_new);
-#ifdef CONFIG_SND_HDA_GENERIC
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
#define is_generic_config(codec) \
(codec->modelname && !strcmp(codec->modelname, "generic"))
#else
@@ -1339,23 +1339,15 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid)
/*
* Dynamic symbol binding for the codec parsers
*/
-#ifdef MODULE
-#define load_parser_sym(sym) ((int (*)(struct hda_codec *))symbol_request(sym))
-#define unload_parser_addr(addr) symbol_put_addr(addr)
-#else
-#define load_parser_sym(sym) (sym)
-#define unload_parser_addr(addr) do {} while (0)
-#endif
#define load_parser(codec, sym) \
- ((codec)->parser = load_parser_sym(sym))
+ ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym))
static void unload_parser(struct hda_codec *codec)
{
- if (codec->parser) {
- unload_parser_addr(codec->parser);
- codec->parser = NULL;
- }
+ if (codec->parser)
+ symbol_put_addr(codec->parser);
+ codec->parser = NULL;
}
/*
@@ -1570,7 +1562,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets);
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
static bool is_likely_hdmi_codec(struct hda_codec *codec)
{
@@ -1620,12 +1612,20 @@ int snd_hda_codec_configure(struct hda_codec *codec)
patch = codec->preset->patch;
if (!patch) {
unload_parser(codec); /* to be sure */
- if (is_likely_hdmi_codec(codec))
+ if (is_likely_hdmi_codec(codec)) {
+#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
patch = load_parser(codec, snd_hda_parse_hdmi_codec);
-#ifdef CONFIG_SND_HDA_GENERIC
- if (!patch)
+#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI)
+ patch = snd_hda_parse_hdmi_codec;
+#endif
+ }
+ if (!patch) {
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
patch = load_parser(codec, snd_hda_parse_generic_codec);
+#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC)
+ patch = snd_hda_parse_generic_codec;
#endif
+ }
if (!patch) {
printk(KERN_ERR "hda-codec: No codec parser is available\n");
return -ENODEV;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8321a97d5c05..d9a09bdd09db 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3269,7 +3269,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
mutex_unlock(&codec->control_mutex);
snd_hda_codec_flush_cache(codec); /* flush the updates */
if (err >= 0 && spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return err;
}
@@ -3390,7 +3390,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol,
return ret;
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return ret;
}
@@ -3795,7 +3795,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
return 0;
snd_hda_activate_path(codec, path, true, false);
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
path_power_down_sync(codec, old_path);
return 1;
}
@@ -5270,7 +5270,7 @@ static void init_input_src(struct hda_codec *codec)
}
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
}
/* set right pin controls for digital I/O */
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 07f767231c9f..c908afbe4d94 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -274,6 +274,7 @@ struct hda_gen_spec {
void (*init_hook)(struct hda_codec *codec);
void (*automute_hook)(struct hda_codec *codec);
void (*cap_sync_hook)(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
/* PCM hooks */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fa2879a21a50..e354ab1ec20f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -198,7 +198,7 @@ MODULE_DESCRIPTION("Intel HDA driver");
#endif
#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
#define SUPPORT_VGA_SWITCHEROO
#endif
#endif
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4e0ec146553d..bcf91bea3317 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3291,7 +3291,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec)
}
static void cxt_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
cxt_update_headset_mode(codec);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d9693ca9546f..a9a83b85517a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -708,7 +708,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
}
static void alc_inv_dmic_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_inv_dmic_sync(codec, false);
}
@@ -3218,7 +3219,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
/* turn on/off mic-mute LED per capture hook */
static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct alc_spec *spec = codec->spec;
unsigned int oldval = spec->gpio_led;
@@ -3528,7 +3530,8 @@ static void alc_update_headset_mode(struct hda_codec *codec)
}
static void alc_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_update_headset_mode(codec);
}
@@ -4329,6 +4332,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -4434,9 +4438,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
if (spec->codec_variant != ALC269_TYPE_ALC269VB)
return;
- /* ALC271X doesn't seem to support these COEFs (bko#52181) */
- if (!strcmp(codec->chip_name, "ALC271X"))
- return;
if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
alc_write_coef_idx(codec, 0xf, 0x960b);
@@ -5106,6 +5107,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6998cf29b9bc..7311badf6a94 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -194,7 +194,7 @@ struct sigmatel_spec {
int default_polarity;
unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
- bool mic_mute_led_on; /* current mic mute state */
+ unsigned int mic_enabled; /* current mic mute state (bitmask) */
/* stream */
unsigned int stream_delay;
@@ -324,19 +324,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
/* hook for controlling mic-mute LED GPIO */
static void stac_capture_led_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct sigmatel_spec *spec = codec->spec;
- bool mute;
+ unsigned int mask;
+ bool cur_mute, prev_mute;
- if (!ucontrol)
+ if (!kcontrol || !ucontrol)
return;
- mute = !(ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
- if (spec->mic_mute_led_on != mute) {
- spec->mic_mute_led_on = mute;
- if (mute)
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ prev_mute = !spec->mic_enabled;
+ if (ucontrol->value.integer.value[0] ||
+ ucontrol->value.integer.value[1])
+ spec->mic_enabled |= mask;
+ else
+ spec->mic_enabled &= ~mask;
+ cur_mute = !spec->mic_enabled;
+ if (cur_mute != prev_mute) {
+ if (cur_mute)
spec->gpio_data |= spec->mic_mute_led_gpio;
else
spec->gpio_data &= ~spec->mic_mute_led_gpio;
@@ -4462,7 +4469,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
if (spec->mic_mute_led_gpio) {
spec->gpio_mask |= spec->mic_mute_led_gpio;
spec->gpio_dir |= spec->mic_mute_led_gpio;
- spec->mic_mute_led_on = true;
+ spec->mic_enabled = 0;
spec->gpio_data |= spec->mic_mute_led_gpio;
spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 5799fbc24c28..8fe3b8c18ed4 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -39,6 +39,7 @@ static void update_tpacpi_mute_led(void *private_data, int enabled)
}
static void update_tpacpi_micmute_led(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
if (!ucontrol || !led_set_func)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index be456ce264d0..8ca405cd7c1a 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/uaccess.h>
#include <linux/irqchip/arm-gic.h>
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 88b2fe3ddf42..00d86427af0f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
list_add_tail(&dev->list, &kvm->coalesced_zones);
mutex_unlock(&kvm->slots_lock);
- return ret;
+ return 0;
out_free_dev:
mutex_unlock(&kvm->slots_lock);
-
kfree(dev);
- if (dev == NULL)
- return -ENXIO;
-
- return 0;
+ return ret;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,